initial migration

This commit is contained in:
Remy Moll 2023-10-05 14:34:37 +02:00
parent 5cb41fd5e4
commit 41f0153fd0
145 changed files with 17441 additions and 0 deletions

150
apps/adguard/configmap.yaml Normal file
View File

@ -0,0 +1,150 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: adguard-home-config
namespace: adguard
data:
AdGuardHome.yaml: |-
bind_host: 0.0.0.0
bind_port: 3000
beta_bind_port: 0
users: []
auth_attempts: 5
block_auth_min: 15
http_proxy: ""
language: ""
debug_pprof: false
web_session_ttl: 720
dns:
bind_hosts:
- 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_file_enabled: true
querylog_interval: 2160h
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 20
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
upstream_dns_file: ""
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: false
fastest_addr: false
fastest_timeout: 1s
allowed_clients: []
disallowed_clients: []
blocked_hosts:
- version.bind
- id.server
- hostname.bind
trusted_proxies:
- 127.0.0.0/8
- ::1/128
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
cache_optimistic: false
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
max_goroutines: 300
ipset: []
filtering_enabled: true
filters_update_interval: 24
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: []
blocked_services: []
upstream_timeout: 10s
private_networks: []
use_private_ptr_resolvers: true
local_ptr_upstreams:
- 192.168.1.1
tls:
enabled: false
server_name: ""
force_https: false
port_https: 443
port_dns_over_tls: 853
port_dns_over_quic: 853
port_dnscrypt: 0
dnscrypt_config_file: ""
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: ""
private_key: ""
certificate_path: ""
private_key_path: ""
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: true
url: https://adaway.org/hosts.txt
name: AdAway Default Blocklist
id: 2
- enabled: true
url: https://someonewhocares.org/hosts/zero/hosts
name: Dan Pollock's List
id: 1684963532
whitelist_filters: []
user_rules: []
dhcp:
enabled: false
interface_name: ""
local_domain_name: lan
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
clients:
runtime_sources:
whois: true
arp: true
rdns: true
dhcp: true
hosts: true
persistent: []
log_compress: false
log_localtime: false
log_max_backups: 0
log_max_size: 100
log_max_age: 3
log_file: ""
verbose: false
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 14

View File

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: adguard-home
namespace: adguard
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/instance: adguard
app.kubernetes.io/name: adguard-home
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: adguard
app.kubernetes.io/name: adguard-home
spec:
containers:
- args:
- --config
- /opt/adguardhome/conf/AdGuardHome.yaml
- --work-dir
- /opt/adguardhome/work
- --no-check-update
env:
- name: TZ
value: Europe/Berlin
image: adguard/adguardhome:v0.107.7
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
name: adguard-home
ports:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 53
name: dns-udp
protocol: UDP
- containerPort: 3000
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
resources: {}
startupProbe:
failureThreshold: 30
periodSeconds: 5
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /opt/adguardhome/conf/
name: adguard-home-config
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 0777
name: adguard-home-config
name: adguard-home-config

42
apps/adguard/ingress.yaml Normal file
View File

@ -0,0 +1,42 @@
# apiVersion: traefik.containo.us/v1alpha1
# kind: Middleware
# metadata:
# name: authentik-auth
# namespace: adguard
# spec:
# forwardAuth:
# address: https://adguard.kluster.moll.re/outpost.goauthentik.io/auth/traefik
# trustForwardHeader: true
# authResponseHeaders:
# - X-authentik-username
# - X-authentik-groups
# - X-authentik-email
# - X-authentik-name
# - X-authentik-uid
# - X-authentik-jwt
# - X-authentik-meta-jwks
# - X-authentik-meta-outpost
# - X-authentik-meta-provider
# - X-authentik-meta-app
# - X-authentik-meta-version
# ---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: adguard-ingress
namespace: adguard
spec:
entryPoints:
- websecure
routes:
- match: Host(`adguard.kluster.moll.re`)
kind: Rule
# middlewares:
# - name: authentik-auth
services:
- name: adguard-home
port: 3000
tls:
certResolver: default-tls

61
apps/adguard/service.yaml Normal file
View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Service
metadata:
name: adguard-home
namespace: adguard
spec:
ports:
- name: http
port: 3000
protocol: TCP
targetPort: http
selector:
app.kubernetes.io/instance: adguard
app.kubernetes.io/name: adguard-home
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
annotations:
metallb.universe.tf/ip-allocated-from-pool: default
metallb.universe.tf/allow-shared-ip: adguard-svc
name: adguard-home-dns-tcp
namespace: adguard
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
ports:
- name: dns-tcp
nodePort: 31306
port: 53
protocol: TCP
targetPort: 53
selector:
app.kubernetes.io/instance: adguard
app.kubernetes.io/name: adguard-home
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
annotations:
metallb.universe.tf/ip-allocated-from-pool: default
metallb.universe.tf/allow-shared-ip: adguard-svc
name: adguard-home-dns-udp
namespace: adguard
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
ports:
- name: dns-udp
nodePort: 30547
port: 53
protocol: UDP
targetPort: 53
selector:
app.kubernetes.io/instance: adguard
app.kubernetes.io/name: adguard-home
type: LoadBalancer

365
apps/adguard/values.yaml Normal file
View File

@ -0,0 +1,365 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
controller:
# -- Number of pods to load balance between
replicas: 1
initContainers:
# -- Configures an initContainer that copies the configmap to the AdGuardHome conf directory
# It does NOT overwrite when the file already exists.
# @default -- See values.yaml
copy-configmap:
image: busybox
imagePullPolicy: IfNotPresent
command:
- "sh"
- "-c"
- |
if [ ! -f /opt/adguardhome/conf/AdGuardHome.yaml ]; then
mkdir -p /opt/adguardhome/conf
cp /tmp/AdGuardHome.yaml /opt/adguardhome/conf/AdGuardHome.yaml
fi
volumeMounts:
- name: adguard-home-config
mountPath: /tmp/AdGuardHome.yaml
subPath: AdGuardHome.yaml
- name: config
mountPath: /opt/adguardhome/conf
securityContext:
runAsUser: 0
image:
# -- image repository
repository: adguard/adguardhome
# @default -- chart.appVersion
tag:
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables.
# @default -- See below
env:
# -- Set the container timezone
TZ: Europe/Berlin
# -- arguments passed to the adguard-home command line.
args:
- "--config"
- "/opt/adguardhome/conf/AdGuardHome.yaml"
- "--work-dir"
- "/opt/adguardhome/work"
- "--no-check-update"
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
primary: true
ports:
http:
port: 3000
dns-tcp:
enabled: true
type: LoadBalancer
loadBalancerIP: 192.168.3.2
annotations:
metallb.universe.tf/allow-shared-ip: adguard-svc
ports:
dns-tcp:
enabled: true
port: 53
protocol: TCP
targetPort: 53
dns-udp:
enabled: true
type: LoadBalancer
loadBalancerIP: 192.168.3.2
annotations:
metallb.universe.tf/allow-shared-ip: adguard-svc
ports:
dns-udp:
enabled: true
port: 53
protocol: UDP
targetPort: 53
dns-tls-udp:
enabled: true
type: LoadBalancer
loadBalancerIP: 192.168.3.5
annotations:
metallb.universe.tf/allow-shared-ip: adguard-svc
ports:
dns-tls-udp:
enabled: true
port: 853
protocol: UDP
targetPort: 853
dns-tls-tcp:
enabled: true
type: LoadBalancer
loadBalancerIP: 192.168.3.5
annotations:
metallb.universe.tf/allow-shared-ip: adguard-svc
ports:
dns-tls-tcp:
enabled: true
port: 853
protocol: TCP
targetPort: 853
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: true
mountPath: /opt/adguardhome/conf
data:
enabled: false
mountPath: /opt/adguardhome/work
# config -- AdGuard Home cojnfiguration. For a full list of options see https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration.
# @default -- See values.yaml
config: |
bind_host: 0.0.0.0
bind_port: 3000
beta_bind_port: 0
users: []
auth_attempts: 5
block_auth_min: 15
http_proxy: ""
language: ""
debug_pprof: false
web_session_ttl: 720
dns:
bind_hosts:
- 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_file_enabled: true
querylog_interval: 2160h
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 20
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
upstream_dns_file: ""
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: false
fastest_addr: false
fastest_timeout: 1s
allowed_clients: []
disallowed_clients: []
blocked_hosts:
- version.bind
- id.server
- hostname.bind
trusted_proxies:
- 127.0.0.0/8
- ::1/128
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
cache_optimistic: false
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
max_goroutines: 300
ipset: []
filtering_enabled: true
filters_update_interval: 24
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: []
blocked_services: []
upstream_timeout: 10s
private_networks: []
use_private_ptr_resolvers: true
local_ptr_upstreams:
- 192.168.1.1
tls:
enabled: true
server_name: "dns.moll.re"
force_https: false
port_https: 443
port_dns_over_tls: 853
port_dns_over_quic: 853
port_dnscrypt: 0
dnscrypt_config_file: ""
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: |-
-----BEGIN CERTIFICATE-----
MIIFyzCCA7OgAwIBAgIUEvyI5bCa56vvyQgTbLyR7+c7vQMwDQYJKoZIhvcNAQEL
BQAwdTELMAkGA1UEBhMCREUxCzAJBgNVBAgMAkJXMREwDwYDVQQHDAhGcmVpYnVy
ZzENMAsGA1UECgwEUmVteTEKMAgGA1UECwwBTTEQMA4GA1UEAwwHbW9sbC5yZTEZ
MBcGCSqGSIb3DQEJARYKbWVAbW9sbC5yZTAeFw0yMzA3MTUxNzQ0MTVaFw0yNDA3
MTQxNzQ0MTVaMHUxCzAJBgNVBAYTAkRFMQswCQYDVQQIDAJCVzERMA8GA1UEBwwI
RnJlaWJ1cmcxDTALBgNVBAoMBFJlbXkxCjAIBgNVBAsMAU0xEDAOBgNVBAMMB21v
bGwucmUxGTAXBgkqhkiG9w0BCQEWCm1lQG1vbGwucmUwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQDpS0Xtii0VITKFr9XFLcWchI6//I7iMeKkYi7uEq60
1YZQ8/Zppg1M15BhD8ZEQ0JZ42ufi0p4B0LYMGHYF+2kKsbFxcEPQTUeXCLcjYVA
ueZ+GTh+FrUrSQvHSevUbVXytAwiqAN/eAvXBMdOKisPUM9Cmk/KHA+W+anw4Uxq
ZvHq5GG9Z0IksTHI2oEMp/8cZ8lRXzHmOUYQGveBX6PBPvcttP8GwCU6vsPVSphZ
7XF2LPqeMnBGgmOz51QTRpS7NBHMsSDR20VgSTjI+F8nJnQsGO5Iq9IpQzlDlAsL
jgPOT3W/pdeZD1mX/c9EpYEKf/0ubEBiWc+kJqkrdmsUX6cZ06qEUa08yCMSzkao
mHrMzw22kjICG9h+0sZvTetPvpYZsBqQRejDS/cu+buAaDNchGNhl1YPp8iAlKUT
YB4gbcNqceCGUmbQX06B/OwJiYIoN5ghh2wmqNrFXYltfALBVhWFtU2DTAS9k399
W2hd4u77uJngK0WLoKQuV/wi81dbk0kAI7eRUI1H/Y4hC1MCI5M6zewrJ7QgOYBi
qkYydYQGFu1ToDt6maDVBX05PcoBPwbUfrmZBjR5kzBawvH6reDuANkEXfJ0+2hA
JBAxXPKyQVc9Y87nDATvkl7qWOKjfJairKAd03lvJlesr6+7GwMMnE/6h91QF4Vq
OQIDAQABo1MwUTAdBgNVHQ4EFgQUunr29QozKy+AlTrq+PAoSjPFOQIwHwYDVR0j
BBgwFoAUunr29QozKy+AlTrq+PAoSjPFOQIwDwYDVR0TAQH/BAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAgEAPeczDC1OScGZ6UVjFUF+BqI1Am9TwUNVD2cRnbXvQ2g7
nU8vYSfWx00bhRTpuDEG997HkCCvaUYIArbGtgplB+bCk6GMnQQfnRWIyFz/cy+Y
yuftUY0PufXzCe33J2Q0SQCNKdEvOsfiPCkyrgMSlomoIDPhs4wQ8SOE0Lnl4fNw
i1uVDd6pTxwwfpfsvN5lBwXN+RDr1Awe07f9SJmYklqQAIP5Kthq7QJsN1QHvmtW
JL7AYlltDTUYvE2kBnQKjkNYv9Qj4PGUvipVlCKA4cEVAZXHam01RqPXEFj5I9B4
Q9S+oT7htoXWuz9kAwsSCZVEW1QBzRL7UNIckMWsc1jRSiCT5Nc/sOtPyIc9in+i
J/XGPjSBvQZrnitLhR4qByG/dY+istQkcEERjElwhzucEyNkgtENJfJEevdJsrBf
oGaaK5ljemYsk1e+QHB3FWmNbIysKBMn44bHgu7DeQediLCjvwdasjVorDW1mv5Z
8Aoe075vxTmHGSjfMPiAzJnYMy0zCT1VcR+AtPKUtr11z2xgOrAqZqlTaR/ud6ce
B11n3oIs5Kwarvhwx2Qw7XvcGOa2PBGZW4kcoDRn9GNFcP5K2AAuRJD9FLTbr8ZO
6a0bv0KUksQYX+U/r3+qSn87TXyIJ1IbKY2jQYu/+KEpeyFnviXw+IoM/YHDqdw=
-----END CERTIFICATE-----
private_key: |-
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDpS0Xtii0VITKF
r9XFLcWchI6//I7iMeKkYi7uEq601YZQ8/Zppg1M15BhD8ZEQ0JZ42ufi0p4B0LY
MGHYF+2kKsbFxcEPQTUeXCLcjYVAueZ+GTh+FrUrSQvHSevUbVXytAwiqAN/eAvX
BMdOKisPUM9Cmk/KHA+W+anw4UxqZvHq5GG9Z0IksTHI2oEMp/8cZ8lRXzHmOUYQ
GveBX6PBPvcttP8GwCU6vsPVSphZ7XF2LPqeMnBGgmOz51QTRpS7NBHMsSDR20Vg
STjI+F8nJnQsGO5Iq9IpQzlDlAsLjgPOT3W/pdeZD1mX/c9EpYEKf/0ubEBiWc+k
JqkrdmsUX6cZ06qEUa08yCMSzkaomHrMzw22kjICG9h+0sZvTetPvpYZsBqQRejD
S/cu+buAaDNchGNhl1YPp8iAlKUTYB4gbcNqceCGUmbQX06B/OwJiYIoN5ghh2wm
qNrFXYltfALBVhWFtU2DTAS9k399W2hd4u77uJngK0WLoKQuV/wi81dbk0kAI7eR
UI1H/Y4hC1MCI5M6zewrJ7QgOYBiqkYydYQGFu1ToDt6maDVBX05PcoBPwbUfrmZ
BjR5kzBawvH6reDuANkEXfJ0+2hAJBAxXPKyQVc9Y87nDATvkl7qWOKjfJairKAd
03lvJlesr6+7GwMMnE/6h91QF4VqOQIDAQABAoICAFXdtDe5X12DEf7dmJ9R+QVi
Ts5ADXEYrlQVpTNQIgiB/MVn/d6l1Qhe4Q+wiCeQ3+eIypB26qph9crvh9vK9tcx
PWcGocfVFtF9VQF7fzuzELCB5OaXwgfUA2dPAGN3+KXzefH5iAwPKcByzE6rO50P
/7ECbfK0QFKvwspbik4xZMIxW/4j9tbddzb3oX8AiGeylYkDMjEMDIsZ+dYe1v1m
CQFEOIeKCknkc9zZ71hOCjBWXsoCQ4vYKw1IzAuqM0zx3clKuoszGwZU/PcPX6pf
v2uJo46Q2zH/waBraWNP2nvBiFPJHSEDYtUMAJFCH0w3jn7bLhlk+AVxi1tpYwBx
SOFQKmKbJgTWpmX7o8bhyNmSg6gLTquKKYuOeUsJTe4SERnhKNVen/mf1BdV5S1A
iLj9mg5tFL1O+f8wl8q0QA5aM3o1G/YMlG28Na6X8l89BiDvfdG4YALzeJs5k1yn
VnpZElikhx63HQjaLE+u4nSBwr0s79Hnq4Xge+rEPCRVpHhfZ1T/Ka3NwqcflcM7
GvvRnXfLLyfS3DOQg9BCwE94hzJgh7V4BqEQInzkAR3/wF83xTT0LaWLBsJXTsWr
rHcdPxpMVXNUfelBmA3Blu1d07lDw8kMzYXzCJ4AE9gjdgN9ltwjg7ZDQ3w6Tnc1
09aLmIUeRx6r7vs8pBMPAoIBAQD3epVeC4Urpmop21Jzop7nqvQqmHwDvUPIHKWZ
a1e9YmHfNR6Vibzw8jqjd7IJMd5mzlcot+bTjfFGxfZ/KidE5MB8rvwS0MVQnamZ
dnl1OX9c/+G4jW8xCzNQlkAXT2xcaMPO/ged6smdtZkvvnjfyX0L78fbKG+4fsc8
PoIB5gXjApVVN4ujeaKUud2jr2uHueQqI8taZlhlIojxc1w/a9r0iiLK+sY/HvWH
gERxDFWQjg8kkFGXC3KFOz0UJiolDus9sK9cLcDI4IavOotVaxEoz778u9644+GM
wfRJCN8OBT3RQjPy77L1VOCjrbd1TtknDDG+kAN4ZLLEPCO/AoIBAQDxU6gqjGDy
SC1mSgl8x6ODkmCs2a9UvZeg9/KA/UzTGCLeSgftPwgCeGV6d6dpqFxsvqhVDVtp
pkqFa2+X0rsIG4JFl6qZTbXpJIqbdkTeWjjimg809fTqZnSJSchUiuIWzqvGlOSL
cM5c7+WNteLVHjldiNT0+jReXPtxAJD9jIV3LubmWZ5qs3tYXKGgQvCItLo6REYE
SKUZAsX/T6O6HAypv89AcS+UZxc2pq4htFRJY5XarLbs8BuDJAYWm3chMwwGIDEx
J7cCXWWWQkU7W1GOckU4oo6FPGzjREPwyeiYcvias2/nm4tOc5t0gRJHIR8W6tQF
5An7lLSHe5AHAoIBAADiNSpSzDTtsS9ZEyBKklqtZ5XHWZoB0P4j7AtyMKwCb+sG
G4fZKA2ML91pjf8uaGbhkboZff9/YD8qccjec6lxT6aiUVAX4rx486QSojhi7it8
1md8SctZCOPexXfP1sk1ro1MpuZPckzX2yYqfe/+ni2uu33y1QNJoJh8eKZdFeRL
nBDj0+HPi18QktQEylN/vGrSGeXGu8YQq4CBMvEfB3ccDye+YXrUN3g2YwgsTRnp
B/DPexsY9V24am1p/XiIZxqfSOEBYNDWzGRPxzOU4EjPBRWN7ium1KVWA/NGztUT
+7aFj/3sES2DEhJDioYms+vJxVuy0/BYG7NLq60CggEAZCxZre+/flK/paot7gHg
ugjU4GssAH0Cp+rEWw7KCQYH00XfrHdxl7TqSr/IWm9sjidGMKfuvhgs7tz94YOz
51Wj6cdfJWvAixqD/qxFQhcpbcaNcWp3U6Vb0nEyGwXbe6QmYbQEem1E/AcIvp41
nkmBfnYCD/6cJl9qcCnQBa+C50osxomE3L3MAY3R+XhP6C887lrQxY5yGcOw9J3W
VLa3+u6H1TQmj++LD0B5H7x/EEeqOK9g71Fr2i/l5xR5iuppn1FVmhXmPbEPLiQs
IMtzOzHr0eqIRn4ipOP9X8IwLrfqwiyh0v4aAWKzsNSzBZuWEClCAX/7NNcxaNu9
mQKCAQEA3dk8ScY8bVPgFg2x7oqujVZbrNizhw2+BXYuH6HRVINPDYzIapur9uiw
I+STHoUod8aRNvwDLfhkI+MabmEbt/eDsBpRrJYYLi2uTed5gIiLqPS8MPuKr++7
UwJz4OPZu1xOjbFapvKvPSbPhS254tozQyi5Xbl8W268SCQhF+hEb+AT5JTcoPlI
ZNN5hp0Ooq6EouX8heyeG7le9V2G+HFHR9aWniD9kRRirO+oqWTXcG+9zHRhkdbF
4vRGwZ8+mj/0fKAHlFpeDRiKNbma7rTNDyEDR9jQ+GOC1QmOYeiei6FDKYEPcHxh
UBWqdlD+gUjtzQvD3yMo7JN9DIO5Eg==
-----END PRIVATE KEY-----
certificate_path: ""
private_key_path: ""
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: true
url: https://adaway.org/hosts.txt
name: AdAway Default Blocklist
id: 2
- enabled: true
url: https://someonewhocares.org/hosts/zero/hosts
name: Dan Pollock's List
id: 1684963532
whitelist_filters: []
user_rules: []
dhcp:
enabled: false
interface_name: ""
local_domain_name: lan
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
clients:
runtime_sources:
whois: true
arp: true
rdns: true
dhcp: true
hosts: true
persistent: []
log_compress: false
log_localtime: false
log_max_backups: 0
log_max_size: 100
log_max_age: 3
log_file: ""
verbose: false
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 14

View File

@ -0,0 +1,126 @@
apiVersion: v1
kind: Namespace
metadata:
name: codeserver
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: codeserver
name: codeserver-data-nfs
labels:
directory: codeserver
spec:
storageClassName: fast
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /codeserver
server: nfs-server.storage.svc.cluster.local
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: codeserver
name: codeserver-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: codeserver
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: codeserver
name: codeserver
labels:
app: codeserver
spec:
replicas: 1
selector:
matchLabels:
app: codeserver
template:
metadata:
labels:
app: codeserver
spec:
containers:
- name: codeserver
image: gitpod/openvscode-server
ports:
- containerPort: 3000
volumeMounts:
- mountPath: /home/workspace
name: codeserver-data
volumes:
- name: codeserver-data
persistentVolumeClaim:
claimName: codeserver-data-nfs
---
apiVersion: v1
kind: Service
metadata:
namespace: codeserver
name: codeserver
spec:
type: ClusterIP
ports:
- name: http
port: 3000
selector:
app: codeserver
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: codeserver-ingress
namespace: codeserver
spec:
entryPoints:
- websecure
routes:
- match: Host(`code.kluster.moll.re`)
middlewares:
- name: codeserver-websocket
kind: Rule
services:
- name: codeserver
port: 3000
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: codeserver-websocket
namespace: codeserver
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@ -0,0 +1,17 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: dendrite
name: dendrite-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`dendrite.kluster.moll.re`)
kind: Rule
services:
- name: dendrite
port: 8008
tls:
certResolver: default-tls

407
apps/dendrite/values.yaml Normal file
View File

@ -0,0 +1,407 @@
image:
# -- Docker repository/image to use
repository: "ghcr.io/matrix-org/dendrite-monolith"
# -- Kubernetes pullPolicy
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
# signing key to use
signing_key:
# -- Create a new signing key, if not exists
create: true
# -- Use an existing secret
existingSecret: ""
# -- Default resource requests/limits.
# @default -- sets some sane default values
resources:
requests:
memory: "512Mi"
limits:
memory: "4096Mi"
persistence:
# -- The storage class to use for volume claims.
# Used unless specified at the specific component.
# Defaults to the cluster default storage class.
storageClass: "nfs-client"
jetstream:
# -- Use an existing volume claim for jetstream
existingClaim: ""
# -- PVC Storage Request for the jetstream volume
capacity: "1Gi"
# -- The storage class to use for volume claims.
# Defaults to persistence.storageClass
storageClass: ""
media:
# -- Use an existing volume claim for media files
existingClaim: ""
# -- PVC Storage Request for the media volume
capacity: "1Gi"
# -- The storage class to use for volume claims.
# Defaults to persistence.storageClass
storageClass: ""
search:
# -- Use an existing volume claim for the fulltext search index
existingClaim: ""
# -- PVC Storage Request for the search volume
capacity: "1Gi"
# -- The storage class to use for volume claims.
# Defaults to persistence.storageClass
storageClass: ""
# -- Add additional volumes to the Dendrite Pod
extraVolumes: []
# ex.
# - name: extra-config
# secret:
# secretName: extra-config
# -- Configure additional mount points volumes in the Dendrite Pod
extraVolumeMounts: []
# ex.
# - mountPath: /etc/dendrite/extra-config
# name: extra-config
strategy:
# -- Strategy to use for rolling updates (e.g. Recreate, RollingUpdate)
# If you are using ReadWriteOnce volumes, you should probably use Recreate
type: RollingUpdate
rollingUpdate:
# -- Maximum number of pods that can be unavailable during the update process
maxUnavailable: 25%
# -- Maximum number of pods that can be scheduled above the desired number of pods
maxSurge: 25%
dendrite_config:
version: 2
global:
# -- **REQUIRED** Servername for this Dendrite deployment.
server_name: "dendrite.kluster.moll.re"
# -- The private key to use. (**NOTE**: This is overriden in Helm)
private_key: /etc/dendrite/secrets/signing.key
# -- The server name to delegate server-server communications to, with optional port
# e.g. localhost:443
well_known_server_name: "dendrite.kluster.moll.re:443"
# -- The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
well_known_client_name: "dendrite.kluster.moll.re:443"
# -- Lists of domains that the server will trust as identity servers to verify third
# party identifiers such as phone numbers and email addresses.
trusted_third_party_id_servers:
- matrix.org
- vector.im
# -- The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
# to old signing keys that were formerly in use on this domain name. These
# keys will not be used for federation request or event signing, but will be
# provided to any other homeserver that asks when trying to verify old events.
old_private_keys:
# If the old private key file is available:
# - private_key: old_matrix_key.pem
# expired_at: 1601024554498
# If only the public key (in base64 format) and key ID are known:
# - public_key: mn59Kxfdq9VziYHSBzI7+EDPDcBS2Xl7jeUdiiQcOnM=
# key_id: ed25519:mykeyid
# expired_at: 1601024554498
# -- Disable federation. Dendrite will not be able to make any outbound HTTP requests
# to other servers and the federation API will not be exposed.
disable_federation: false
key_validity_period: 168h0m0s
database:
# -- The connection string for connections to Postgres.
# This will be set automatically if using the Postgres dependency
connection_string: ""
# -- Default database maximum open connections
max_open_conns: 90
# -- Default database maximum idle connections
max_idle_conns: 5
# -- Default database maximum lifetime
conn_max_lifetime: -1
jetstream:
# -- Persistent directory to store JetStream streams in.
storage_path: "/data/jetstream"
# -- NATS JetStream server addresses if not using internal NATS.
addresses: []
# -- The prefix for JetStream streams
topic_prefix: "Dendrite"
# -- Keep all data in memory. (**NOTE**: This is overriden in Helm to `false`)
in_memory: false
# -- Disables TLS validation. This should **NOT** be used in production.
disable_tls_validation: true
cache:
# -- The estimated maximum size for the global cache in bytes, or in terabytes,
# gigabytes, megabytes or kilobytes when the appropriate 'tb', 'gb', 'mb' or
# 'kb' suffix is specified. Note that this is not a hard limit, nor is it a
# memory limit for the entire process. A cache that is too small may ultimately
# provide little or no benefit.
max_size_estimated: 1gb
# -- The maximum amount of time that a cache entry can live for in memory before
# it will be evicted and/or refreshed from the database. Lower values result in
# easier admission of new cache entries but may also increase database load in
# comparison to higher values, so adjust conservatively. Higher values may make
# it harder for new items to make it into the cache, e.g. if new rooms suddenly
# become popular.
max_age: 1h
report_stats:
# -- Configures phone-home statistics reporting. These statistics contain the server
# name, number of active users and some information on your deployment config.
# We use this information to understand how Dendrite is being used in the wild.
enabled: false
# -- Endpoint to report statistics to.
endpoint: https://matrix.org/report-usage-stats/push
presence:
# -- Controls whether we receive presence events from other servers
enable_inbound: false
# -- Controls whether we send presence events for our local users to other servers.
# (_May increase CPU/memory usage_)
enable_outbound: false
server_notices:
# -- Server notices allows server admins to send messages to all users on the server.
enabled: false
# -- The local part for the user sending server notices.
local_part: "_server"
# -- The display name for the user sending server notices.
display_name: "Server Alerts"
# -- The avatar URL (as a mxc:// URL) name for the user sending server notices.
avatar_url: ""
# The room name to be used when sending server notices. This room name will
# appear in user clients.
room_name: "Server Alerts"
# prometheus metrics
metrics:
# -- Whether or not Prometheus metrics are enabled.
enabled: false
# HTTP basic authentication to protect access to monitoring.
basic_auth:
# -- HTTP basic authentication username
user: "metrics"
# -- HTTP basic authentication password
password: metrics
dns_cache:
# -- Whether or not the DNS cache is enabled.
enabled: false
# -- Maximum number of entries to hold in the DNS cache
cache_size: 256
# -- Duration for how long DNS cache items should be considered valid ([see time.ParseDuration](https://pkg.go.dev/time#ParseDuration) for more)
cache_lifetime: "10m"
profiling:
# -- Enable pprof. You will need to manually create a port forwarding to the deployment to access PPROF,
# as it will only listen on localhost and the defined port.
# e.g. `kubectl port-forward deployments/dendrite 65432:65432`
enabled: false
# -- pprof port, if enabled
port: 65432
# -- Configuration for experimental MSC's. (Valid values are: msc2836)
mscs:
mscs: []
# A list of enabled MSC's
# Currently valid values are:
# - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
app_service_api:
# -- Disable the validation of TLS certificates of appservices. This is
# not recommended in production since it may allow appservice traffic
# to be sent to an insecure endpoint.
disable_tls_validation: false
# -- Appservice config files to load on startup. (**NOTE**: This is overriden by Helm, if a folder `./appservices/` exists)
config_files: []
client_api:
# -- Prevents new users from being able to register on this homeserver, except when
# using the registration shared secret below.
registration_disabled: true
# Prevents new guest accounts from being created. Guest registration is also
# disabled implicitly by setting 'registration_disabled' above.
guests_disabled: true
# -- If set, allows registration by anyone who knows the shared secret, regardless of
# whether registration is otherwise disabled.
registration_shared_secret: "this is the shared secret"
# -- enable reCAPTCHA registration
enable_registration_captcha: false
# -- reCAPTCHA public key
recaptcha_public_key: ""
# -- reCAPTCHA private key
recaptcha_private_key: ""
# -- reCAPTCHA bypass secret
recaptcha_bypass_secret: ""
recaptcha_siteverify_api: ""
# TURN server information that this homeserver should send to clients.
turn:
# -- Duration for how long users should be considered valid ([see time.ParseDuration](https://pkg.go.dev/time#ParseDuration) for more)
turn_user_lifetime: "24h"
turn_uris: []
turn_shared_secret: ""
# -- The TURN username
turn_username: ""
# -- The TURN password
turn_password: ""
rate_limiting:
# -- Enable rate limiting
enabled: true
# -- After how many requests a rate limit should be activated
threshold: 20
# -- Cooloff time in milliseconds
cooloff_ms: 500
# -- Users which should be exempt from rate limiting
exempt_user_ids:
federation_api:
# -- Federation failure threshold. How many consecutive failures that we should
# tolerate when sending federation requests to a specific server. The backoff
# is 2**x seconds, so 1 = 2 seconds, 2 = 4 seconds, 3 = 8 seconds, etc.
# The default value is 16 if not specified, which is circa 18 hours.
send_max_retries: 16
# -- Disable TLS validation. This should **NOT** be used in production.
disable_tls_validation: false
prefer_direct_fetch: false
# -- Prevents Dendrite from keeping HTTP connections
# open for reuse for future requests. Connections will be closed quicker
# but we may spend more time on TLS handshakes instead.
disable_http_keepalives: false
# -- Perspective keyservers, to use as a backup when direct key fetch
# requests don't succeed.
# @default -- See value.yaml
key_perspectives:
- server_name: matrix.org
keys:
- key_id: ed25519:auto
public_key: Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw
- key_id: ed25519:a_RXGa
public_key: l8Hft5qXKn1vfHrg3p4+W8gELQVo8N13JkluMfmn2sQ
media_api:
# -- The path to store media files (e.g. avatars) in
base_path: "/data/media_store"
# -- The max file size for uploaded media files
max_file_size_bytes: 10485760
# Whether to dynamically generate thumbnails if needed.
dynamic_thumbnails: false
# -- The maximum number of simultaneous thumbnail generators to run.
max_thumbnail_generators: 10
# -- A list of thumbnail sizes to be generated for media content.
# @default -- See value.yaml
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 640
height: 480
method: scale
sync_api:
# -- This option controls which HTTP header to inspect to find the real remote IP
# address of the client. This is likely required if Dendrite is running behind
# a reverse proxy server.
real_ip_header: X-Real-IP
# -- Configuration for the full-text search engine.
search:
# -- Whether fulltext search is enabled.
enabled: true
# -- The path to store the search index in.
index_path: "/data/search"
# -- The language most likely to be used on the server - used when indexing, to
# ensure the returned results match expectations. A full list of possible languages
# can be found [here](https://github.com/matrix-org/dendrite/blob/76db8e90defdfb9e61f6caea8a312c5d60bcc005/internal/fulltext/bleve.go#L25-L46)
language: "en"
user_api:
# -- bcrypt cost to use when hashing passwords.
# (ranges from 4-31; 4 being least secure, 31 being most secure; _NOTE: Using a too high value can cause clients to timeout and uses more CPU._)
bcrypt_cost: 10
# -- OpenID Token lifetime in milliseconds.
openid_token_lifetime_ms: 3600000
# - Disable TLS validation when hitting push gateways. This should **NOT** be used in production.
push_gateway_disable_tls_validation: false
# -- Rooms to join users to after registration
auto_join_rooms: []
# -- Default logging configuration
logging:
- type: std
level: info
postgresql:
# -- Enable and configure postgres as the database for dendrite.
# @default -- See value.yaml
enabled: true
image:
repository: bitnami/postgresql
tag: "15.1.0"
auth:
username: dendrite
password: changeme
database: dendrite
persistence:
enabled: true
ingress:
# -- Create an ingress for the deployment
enabled: false
# -- The ingressClass to use. Will be converted to annotation if not yet supported.
className: ""
# -- Extra, custom annotations
annotations: {}
# -- The ingress hostname for your matrix server.
# Should align with the server_name and well_known_* hosts.
# If not set, generated from the dendrite_config values.
hostName: ""
# -- TLS configuration. Should contain information for the server_name and well-known hosts.
# Alternatively, set tls.generate=true to generate defaults based on the dendrite_config.
tls: []
service:
type: ClusterIP
port: 8008
prometheus:
servicemonitor:
# -- Enable ServiceMonitor for Prometheus-Operator for scrape metric-endpoint
enabled: false
# -- Extra Labels on ServiceMonitor for selector of Prometheus Instance
labels: {}
rules:
# -- Enable PrometheusRules for Prometheus-Operator for setup alerting
enabled: false
# -- Extra Labels on PrometheusRules for selector of Prometheus Instance
labels: {}
# -- additional alertrules (no default alertrules are provided)
additionalRules: []
grafana:
dashboards:
enabled: false
# -- Extra Labels on ConfigMap for selector of grafana sidecar
labels:
grafana_dashboard: "1"
# -- Extra Annotations on ConfigMap additional config in grafana sidecar
annotations: {}

View File

@ -0,0 +1,100 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: finance
name: actualbudget
labels:
app: actualbudget
spec:
# deployment running a single container
selector:
matchLabels:
app: actualbudget
replicas: 1
template:
metadata:
labels:
app: actualbudget
spec:
containers:
- name: actualbudget
image: actualbudget/actual-server:latest
imagePullPolicy: Always
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: actualbudget-data-nfs
mountPath: /data
ports:
- containerPort: 5006
name: http
protocol: TCP
volumes:
- name: actualbudget-data-nfs
persistentVolumeClaim:
claimName: actualbudget-data-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: fast
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
# selector:
# matchLabels:
# directory: "journal-data"
---
apiVersion: v1
kind: Service
metadata:
namespace: finance
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: finance
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly-importer
name: firefly-importer
namespace: finance
spec:
selector:
matchLabels:
app: firefly-importer
template:
metadata:
labels:
app: firefly-importer
spec:
containers:
- image: fireflyiii/data-importer:latest
imagePullPolicy: Always
name: firefly-importer
resources: {}
ports:
- containerPort: 8080
env:
- name: FIREFLY_III_ACCESS_TOKEN
value: redacted
- name: FIREFLY_III_URL
value: firefly-http:8080
# - name: APP_URL
# value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
---
apiVersion: v1
kind: Service
metadata:
name: firefly-importer-http
namespace: finance
labels:
app: firefly-importer-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly-importer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-importer-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`importer.finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-importer-http
port: 8080
tls:
certResolver: default-tls

View File

@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly
name: firefly
namespace: finance
spec:
selector:
matchLabels:
app: firefly
template:
metadata:
labels:
app: firefly
spec:
containers:
- image: fireflyiii/core:latest
imagePullPolicy: Always
name: firefly
resources: {}
ports:
- containerPort: 8080
env:
- name: APP_ENV
value: "local"
- name: APP_KEY
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
- name: DB_CONNECTION
value: sqlite
- name: APP_URL
value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
volumeMounts:
- mountPath: /var/www/html/storage/database
name: firefly-database
volumes:
- name: firefly-database
persistentVolumeClaim:
claimName: firefly-database-nfs
---
apiVersion: v1
kind: Service
metadata:
name: firefly-http
namespace: finance
labels:
app: firefly-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-http
port: 8080
tls:
certResolver: default-tls

View File

@ -0,0 +1,36 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: firefly-database-nfs
labels:
directory: firefly
spec:
storageClassName: fast
volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "1G"
nfs:
path: /firefly # inside nfs part.
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: firefly-database-nfs
spec:
resources:
requests:
storage: "1G"
storageClassName: fast
accessModes:
- ReadOnlyMany
selector:
matchLabels:
directory: firefly
---

View File

@ -0,0 +1,89 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: drone-runner
namespace: gitea
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: gitea
name: drone-runner
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- create
- delete
- list
- watch
- update
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner
namespace: gitea
subjects:
- kind: ServiceAccount
name: drone-runner
namespace: gitea
roleRef:
kind: Role
name: drone-runner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: gitea
name: drone-runner
labels:
app.kubernetes.io/name: drone-runner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone-runner
template:
metadata:
labels:
app.kubernetes.io/name: drone-runner
spec:
serviceAccountName: drone-runner
containers:
- name: runner
image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
- name: DRONE_RPC_HOST
value: drone-server:80
- name: DRONE_RPC_PROTO
value: http
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_NAMESPACE_DEFAULT
value: gitea
# - name: DRONE_NAMESPACE_RULES
# value: "drone-runner:*"
- name: DRONE_SERVICE_ACCOUNT_DEFAULT
value: drone-runner

View File

@ -0,0 +1,129 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-server
namespace: gitea
labels:
app: drone-server
spec:
replicas: 1
selector:
matchLabels:
app: drone-server
template:
metadata:
labels:
app: drone-server
spec:
containers:
- name: drone
image: drone/drone:latest
env:
- name: DRONE_SERVER_PORT # because the deployment is called drone-server, override this var again!
value: ":80"
- name: DRONE_GITEA_SERVER
value: https://git.kluster.moll.re
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_id
- name: DRONE_GITEA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_secret
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_SERVER_HOST
value: drone.kluster.moll.re
- name: DRONE_SERVER_PROTO
value: https
resources:
requests:
memory: "1Gi"
cpu: 1.5
volumeMounts:
- mountPath: /data
name: drone-data-nfs
volumes:
- name: drone-data-nfs
persistentVolumeClaim:
claimName: drone-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: drone-server
namespace: gitea
labels:
app: drone-server
spec:
type: ClusterIP
ports:
- port: 80
name: http
selector:
app: drone-server
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: drone-server-ingress
namespace: gitea
spec:
entryPoints:
- websecure
routes:
- match: Host(`drone.kluster.moll.re`)
kind: Rule
services:
- name: drone-server
port: 80
tls:
certResolver: default-tls
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: gitea
name: drone-data-nfs
labels:
directory: drone
spec:
storageClassName: fast
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/drone
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: gitea
name: drone-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: drone

View File

@ -0,0 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
}
},
"encryptedData": {
"client_id": "AgA53a7kGJ6zZcx2ooTvTNwxaW2FvfzHJnxg6co54+HXinTJKsc4+GJ1PtdIbsZ7Dgu/sLi/4X90fT+PT2sgEx9jIilmHPdJeRtwV1UID3Y46A7cJlfcAKwNOFzp2PWvBvizbNp7tbJwxeAYnVX8GfN6fi700QxBGqAI3u8qQvLpU6UGW2RM96gCXI7s1QhE1Le6TgoESy5HX95pB7csDRNSwVE02OWfDHKEjH8QD8UvBB9xct6uwDfu7KrsJiNJvWMP6arvpfhy/X+UtCTFmj5wmFYL7oc6vSiCkq+QyHgQTEHTmGpEjEGKcQxPQaus3KhbhcxQBYLMEMYRlLPH0AEAA4dzbSpoVXM3LuIe9FppgrTCknK1uRB8wyrHUeInWO8mG7UraV6m5PUS+UYODMvfjwY3PyiGhTSf6LgMlhMl8e+2rb+OsWphT8Pbeom33PucrYaRFr9RpQkJSwE6HU3JEh25YLfIJ7caqRND8C/p8kD679C8UMcNpBN8WS4Cswn5jzmwbeJNM5DGp9yQVZNx7Bv3dHzx9i3ShjJ6QQnR/zWJZ/dWLy6weGYmdZMMXRAO8CCdruvcX5YyeieXZfchSIlZ/GqqBHptdcLpwLiZsfmyTWeBvk5pMAsZaKJ1tfWpQ84s4epzMoieTfhTueGXmeRKX+DJBBcriU+5YoqNxpU1lPL+LoInorJSKN7c3ouFx78N3GDOCq7mlWI94lY0bIs5zhrfUN137ITCcED62AJ7vks=",
"client_secret": "AgDQXU7x6RLhE9Hc+goeR2+3rW316SLLLA8tfqx3tsykL+vxhRkY5UCEaak3Rgei0k14jB/Rmme+/O/D1/5tc/i885+sGn0yjU7Jo4L5nkIssUOHlmRSGkRJDb9ABPauFXAjap9KLix9bd8ewI7R0lS3tOK9ZhThYhcfDUqV9qkkbSHzwNptkH7gYWt9qzG/rqqqpFP+PCtjzKVve4LCBgaxetcnh1t+d5oh7VAFnSI9Bt1G/DRzi+K3YZ+YG5+XKevBp06GMiLUMiv/eUvmOfAB/KO79LnNVbOcRsAHfnqLbXgNjFzspr5xDiGMC/ma1245LavywqXDp0S9jjNEe48i51PPQMwHWV8XEovsM6LHcteluNogt+VkL4mOnmP+sba/V3NO51rt1WXl+ca+U4kBq4dLMsdpWUKemz9BlIRC4etEXjwKJ5DznT7u6GUTrXx2RCm1j0OYWM++P10SdyD6tGjKnZf88a33Wrwm8Y7c47JrPTlP4PqLq9gzvD310uVfs1vGYGULaToGy+D/th8qiWWlu7BIfwqlIj8lruVnOhQ4GeEZmUAsqYf8JfsBwuDc0Y+8qbwjFrr2z+5x+2XBL8KGZVopyme45SHijlBZs7YsJqTBsg5oW09grM8/oO731GtzSYmpat2VZlaILuTjALqo/cu//kxwmqh7UX+jnTJ/2N3bKKSAfHWbHDeHeS2XJ+eKaI4onNYW9J70EfAP3vOpU+zmQ8rOzJuJjRt0HarLwzc5CXb1Xhlgsaoj7zKXPQMnqIDngg==",
"rpc_secret": "AgAcJNCFtOhK28vnLredkTgsVpnMPwaXss5NT5ysc0IbVid2vWRk2CTjBZc5DzjxxLwI1Ok88MFXHP08ZGCYy4rIbwoi7Ei1OEevGWfaI4n5CvAxr4ZamQHSfIX9dVAm9BSSx2M/mDtCKqVEGJEzyHCedrxf6LXM/YTNgjD43BuCZZMu35mRsHItpYFZQSttlHiUvR8y2YKrhV2P7fiWRD3cCVao8ldzKfGuvRfal8ByGoxpsYLj2D9CdtPvRF/TQsWUJJWwzbI9DmbW1MMI4/b26Jfa5TBvHxS1MQxFJpSXuMIengO+b0bi7WaR36y/FrKSNxIrQDHI7XCb00yYaSfj3RkSBVoAD0a2p8vNupHCqsKBoaWd8tMv/wGP8wbBk4DgGeQiTIvfhbQZU/Q2/LVDDficjXVn3IuKP/cqgGVf6lUh5YsUSs8qwpMil7XySiHvaZn+iFAnsXoejd4S2e/pbRvyaxP1aa7TCxnINjpU7IrnUEUiI4glQmAte3MqZWLXcc0Uk3Qz9PP0cD+V8qCOryrPMP2kTAI8LT/K4DgcEMAEGes4Vx1l0oBMF0xJvhM2kZXcEcf0NzuQJvYTgZpQF5xp0TchezLshmEUSIkII9NvAvn+iEYJeHsJUDijjmBloSYe4+QTgdYh6FakVUwYI5U4ztDNrvgqhWjExfbn8HxaFzsNTsuzGoYs+jwXH8Wk2z1Q1oQjDdO5YTjmdqvkSTdin/5CiuCDHaQX6a4gNQ=="
}
}
}

View File

@ -0,0 +1,18 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: gitea-ingress
namespace: gitea
spec:
entryPoints:
- websecure
routes:
- match: Host(`git.kluster.moll.re`)
kind: Rule
services:
- name: gitea-http
port: 3000
tls:
certResolver: default-tls

71
apps/gitea/gitea.pvc.yaml Normal file
View File

@ -0,0 +1,71 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: gitea
name: gitea-data-nfs
labels:
directory: gitea
spec:
storageClassName: fast
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/gitea/data
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: gitea
name: gitea-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: gitea
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: gitea
name: gitea-postgresql-data-nfs
labels:
directory: gitea
spec:
storageClassName: fast
capacity:
storage: "5Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/gitea/postgres
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: gitea
name: gitea-postgresql-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
selector:
matchLabels:
directory: gitea
---

View File

@ -0,0 +1,497 @@
# Default values for gitea.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## @section Global
#
## @param global.imageRegistry global image registry override
## @param global.imagePullSecrets global image pull secrets override; can be extended by `imagePullSecrets`
## @param global.storageClass global storage class override
## @param global.hostAliases global hostAliases which will be added to the pod's hosts files
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
hostAliases: []
# - ip: 192.168.137.2
# hostnames:
# - example.com
## @param replicaCount number of replicas for the statefulset
replicaCount: 1
## @param clusterDomain cluster domain
clusterDomain: cluster.local
## @section Image
## @param image.registry image registry, e.g. gcr.io,docker.io
## @param image.repository Image to start for this pod
## @param image.tag Visit: [Image tag](https://hub.docker.com/r/gitea/gitea/tags?page=1&ordering=last_updated). Defaults to `appVersion` within Chart.yaml.
## @param image.pullPolicy Image pull policy
## @param image.rootless Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher
image:
registry: ""
repository: gitea/gitea
# Overrides the image tag whose default is the chart appVersion.
tag: ""
pullPolicy: Always
rootless: false # only possible when running 1.14 or later
## @param imagePullSecrets Secret to use for pulling the image
imagePullSecrets: []
## @section Security
# Security context is only usable with rootless image due to image design
## @param podSecurityContext.fsGroup Set the shared file system group for all containers in the pod.
podSecurityContext:
fsGroup: 1000
## @param containerSecurityContext Security context
containerSecurityContext: {}
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# # Add the SYS_CHROOT capability for root and rootless images if you intend to
# # run pods on nodes that use the container runtime cri-o. Otherwise, you will
# # get an error message from the SSH server that it is not possible to read from
# # the repository.
# # https://gitea.com/gitea/helm-chart/issues/161
# add:
# - SYS_CHROOT
# privileged: false
# readOnlyRootFilesystem: true
# runAsGroup: 1000
# runAsNonRoot: true
# runAsUser: 1000
## @deprecated The securityContext variable has been split two:
## - containerSecurityContext
## - podSecurityContext.
## @param securityContext Run init and Gitea containers as a specific securityContext
securityContext: {}
## @section Service
service:
## @param service.http.type Kubernetes service type for web traffic
## @param service.http.port Port number for web traffic
## @param service.http.clusterIP ClusterIP setting for http autosetup for statefulset is None
## @param service.http.loadBalancerIP LoadBalancer IP setting
## @param service.http.nodePort NodePort for http service
## @param service.http.externalTrafficPolicy If `service.http.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
## @param service.http.externalIPs External IPs for service
## @param service.http.ipFamilyPolicy HTTP service dual-stack policy
## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer
## @param service.http.annotations HTTP service annotations
http:
type: ClusterIP
port: 3000
clusterIP: None
nodePort:
externalTrafficPolicy:
externalIPs:
ipFamilyPolicy:
ipFamilies:
loadBalancerSourceRanges: []
annotations: {}
## @param service.ssh.type Kubernetes service type for ssh traffic
## @param service.ssh.port Port number for ssh traffic
## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for statefulset is None
## @param service.ssh.loadBalancerIP LoadBalancer IP setting
## @param service.ssh.nodePort NodePort for ssh service
## @param service.ssh.externalTrafficPolicy If `service.ssh.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
## @param service.ssh.externalIPs External IPs for service
## @param service.ssh.ipFamilyPolicy SSH service dual-stack policy
## @param service.ssh.ipFamilies SSH service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.ssh.hostPort HostPort for ssh service
## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer
## @param service.ssh.annotations SSH service annotations
ssh:
type: LoadBalancer
port: 2222
loadBalancerIP: 192.168.3.3
nodePort:
externalTrafficPolicy:
externalIPs:
ipFamilyPolicy:
ipFamilies:
hostPort:
loadBalancerSourceRanges: []
annotations: {}
## @section Ingress
## @param ingress.enabled Enable ingress
## @param ingress.className Ingress class name
## @param ingress.annotations Ingress annotations
## @param ingress.hosts[0].host Default Ingress host
## @param ingress.hosts[0].paths[0].path Default Ingress path
## @param ingress.hosts[0].paths[0].pathType Ingress path type
## @param ingress.tls Ingress tls settings
## @extra ingress.apiVersion Specify APIVersion of ingress object. Mostly would only be used for argocd.
ingress:
enabled: false
# className: nginx
className:
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: git.example.com
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - git.example.com
# Mostly for argocd or any other CI that uses `helm template | kubectl apply` or similar
# If helm doesn't correctly detect your ingress API version you can set it here.
# apiVersion: networking.k8s.io/v1
## @section StatefulSet
#
## @param resources Kubernetes resources
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
## @param schedulerName Use an alternate scheduler, e.g. "stork"
schedulerName: ""
## @param nodeSelector NodeSelector for the statefulset
nodeSelector: {}
## @param tolerations Tolerations for the statefulset
tolerations: []
## @param affinity Affinity for the statefulset
affinity: {}
## @param dnsConfig dnsConfig for the statefulset
dnsConfig: {}
## @param priorityClassName priorityClassName for the statefulset
priorityClassName: ""
## @param statefulset.env Additional environment variables to pass to containers
## @param statefulset.terminationGracePeriodSeconds How long to wait until forcefully kill the pod
## @param statefulset.labels Labels for the statefulset
## @param statefulset.annotations Annotations for the Gitea StatefulSet to be created
statefulset:
env:
[]
# - name: VARIABLE
# value: my-value
terminationGracePeriodSeconds: 60
labels: {}
annotations: {}
## @section Persistence
#
## @param persistence.enabled Enable persistent storage
## @param persistence.existingClaim Use an existing claim to store repository information
## @param persistence.size Size for persistence to store repo information
## @param persistence.accessModes AccessMode for persistence
## @param persistence.labels Labels for the persistence volume claim to be created
## @param persistence.annotations Annotations for the persistence volume claim to be created
## @param persistence.storageClass Name of the storage class to use
## @param persistence.subPath Subdirectory of the volume to mount at
persistence:
enabled: true
existingClaim: gitea-data-nfs
size: 10Gi
accessModes:
- ReadWriteOnce
labels: {}
annotations: {}
storageClass:
subPath:
## @param extraVolumes Additional volumes to mount to the Gitea statefulset
extraVolumes: []
# - name: postgres-ssl-vol
# secret:
# secretName: gitea-postgres-ssl
## @param extraContainerVolumeMounts Mounts that are only mapped into the Gitea runtime/main container, to e.g. override custom templates.
extraContainerVolumeMounts: []
## @param extraInitVolumeMounts Mounts that are only mapped into the init-containers. Can be used for additional preconfiguration.
extraInitVolumeMounts: []
## @deprecated The extraVolumeMounts variable has been split two:
## - extraContainerVolumeMounts
## - extraInitVolumeMounts
## As an example, can be used to mount a client cert when connecting to an external Postgres server.
## @param extraVolumeMounts **DEPRECATED** Additional volume mounts for init containers and the Gitea main container
extraVolumeMounts: []
# - name: postgres-ssl-vol
# readOnly: true
# mountPath: "/pg-ssl"
## @section Init
## @param initPreScript Bash shell script copied verbatim to the start of the init-container.
initPreScript: ""
#
# initPreScript: |
# mkdir -p /data/git/.postgresql
# cp /pg-ssl/* /data/git/.postgresql/
# chown -R git:git /data/git/.postgresql/
# chmod 400 /data/git/.postgresql/postgresql.key
## @param initContainers.resources.limits initContainers.limits Kubernetes resource limits for init containers
## @param initContainers.resources.requests.cpu initContainers.requests.cpu Kubernetes cpu resource limits for init containers
## @param initContainers.resources.requests.memory initContainers.requests.memory Kubernetes memory resource limits for init containers
initContainers:
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
# Configure commit/action signing prerequisites
## @section Signing
#
## @param signing.enabled Enable commit/action signing
## @param signing.gpgHome GPG home directory
## @param signing.privateKey Inline private gpg key for signed Gitea actions
## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey`
signing:
enabled: false
gpgHome: /data/git/.gnupg
privateKey: ""
# privateKey: |-
# -----BEGIN PGP PRIVATE KEY BLOCK-----
# ...
# -----END PGP PRIVATE KEY BLOCK-----
existingSecret: ""
## @section Gitea
#
gitea:
## @param gitea.admin.username Username for the Gitea admin user
## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials
## @param gitea.admin.password Password for the Gitea admin user
## @param gitea.admin.email Email for the Gitea admin user
admin:
# existingSecret: gitea-admin-secret
existingSecret:
username: gitea_admin
password: r8sA8CPHD9!bt6d
email: "gitea@local.domain"
## @param gitea.metrics.enabled Enable Gitea metrics
## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor
metrics:
enabled: false
serviceMonitor:
enabled: false
# additionalLabels:
# prometheus-release: prom1
## @param gitea.ldap LDAP configuration
ldap:
[]
# - name: "LDAP 1"
# existingSecret:
# securityProtocol:
# host:
# port:
# userSearchBase:
# userFilter:
# adminFilter:
# emailAttribute:
# bindDn:
# bindPassword:
# usernameAttribute:
# publicSSHKeyAttribute:
# Either specify inline `key` and `secret` or refer to them via `existingSecret`
## @param gitea.oauth OAuth configuration
oauth:
[]
# - name: 'OAuth 1'
# provider:
# key:
# secret:
# existingSecret:
# autoDiscoverUrl:
# useCustomUrls:
# customAuthUrl:
# customTokenUrl:
# customProfileUrl:
# customEmailUrl:
## @param gitea.config Configuration for the Gitea server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/)
config:
APP_NAME: "Remy's personal git hosting"
server:
DOMAIN: git.kluster.moll.re
ROOT_URL: https://git.kluster.moll.re
SSH_LISTEN_PORT: 2222
actions:
ENABLED: true
## @param gitea.additionalConfigSources Additional configuration from secret or configmap
additionalConfigSources: []
# - secret:
# secretName: gitea-app-ini-oauth
# - configMap:
# name: gitea-app-ini-plaintext
## @param gitea.additionalConfigFromEnvs Additional configuration sources from environment variables
additionalConfigFromEnvs: []
## @param gitea.podAnnotations Annotations for the Gitea pod
podAnnotations: {}
## @param gitea.ssh.logLevel Configure OpenSSH's log level. Only available for root-based Gitea image.
ssh:
logLevel: "INFO"
## @section LivenessProbe
#
## @param gitea.livenessProbe.enabled Enable liveness probe
## @param gitea.livenessProbe.tcpSocket.port Port to probe for liveness
## @param gitea.livenessProbe.initialDelaySeconds Initial delay before liveness probe is initiated
## @param gitea.livenessProbe.timeoutSeconds Timeout for liveness probe
## @param gitea.livenessProbe.periodSeconds Period for liveness probe
## @param gitea.livenessProbe.successThreshold Success threshold for liveness probe
## @param gitea.livenessProbe.failureThreshold Failure threshold for liveness probe
# Modify the liveness probe for your needs or completely disable it by commenting out.
livenessProbe:
enabled: true
tcpSocket:
port: http
initialDelaySeconds: 200
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
## @section ReadinessProbe
#
## @param gitea.readinessProbe.enabled Enable readiness probe
## @param gitea.readinessProbe.tcpSocket.port Port to probe for readiness
## @param gitea.readinessProbe.initialDelaySeconds Initial delay before readiness probe is initiated
## @param gitea.readinessProbe.timeoutSeconds Timeout for readiness probe
## @param gitea.readinessProbe.periodSeconds Period for readiness probe
## @param gitea.readinessProbe.successThreshold Success threshold for readiness probe
## @param gitea.readinessProbe.failureThreshold Failure threshold for readiness probe
# Modify the readiness probe for your needs or completely disable it by commenting out.
readinessProbe:
enabled: true
tcpSocket:
port: http
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
# # Uncomment the startup probe to enable and modify it for your needs.
## @section StartupProbe
#
## @param gitea.startupProbe.enabled Enable startup probe
## @param gitea.startupProbe.tcpSocket.port Port to probe for startup
## @param gitea.startupProbe.initialDelaySeconds Initial delay before startup probe is initiated
## @param gitea.startupProbe.timeoutSeconds Timeout for startup probe
## @param gitea.startupProbe.periodSeconds Period for startup probe
## @param gitea.startupProbe.successThreshold Success threshold for startup probe
## @param gitea.startupProbe.failureThreshold Failure threshold for startup probe
startupProbe:
enabled: false
tcpSocket:
port: http
initialDelaySeconds: 60
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
## @section Memcached
#
## @param memcached.enabled Memcached is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/memcached) if enabled in the values. Complete Configuration can be taken from their website.
## ref: https://hub.docker.com/r/bitnami/memcached/tags/
## @param memcached.service.ports.memcached Port for Memcached
memcached:
enabled: true
# image:
# registry: docker.io
# repository: bitnami/memcached
# tag: ""
# digest: ""
# pullPolicy: IfNotPresent
# pullSecrets: []
service:
ports:
memcached: 11211
## @section PostgreSQL
#
## @param postgresql.enabled Enable PostgreSQL
## @param postgresql.global.postgresql.auth.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql.global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql.global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
## @param postgresql.global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
## @param postgresql.primary.persistence.size PVC Storage Request for PostgreSQL volume
postgresql:
enabled: true
image:
tag: 11
# diagnosticMode:
# enabled: true
# containerSecurityContext:
# runAsUser: 0
global:
postgresql:
auth:
password: gitea
database: gitea
username: gitea
service:
ports:
postgresql: 5432
primary:
persistence:
size: 10Gi
existingClaim: gitea-postgresql-data-nfs
mountPath: /bitnami/postgresql/data
# By default, removed or moved settings that still remain in a user defined values.yaml will cause Helm to fail running the install/update.
# Set it to false to skip this basic validation check.
## @section Advanced
## @param checkDeprecation Set it to false to skip this basic validation check.
## @param test.enabled Set it to false to disable test-connection Pod.
## @param test.image.name Image name for the wget container used in the test-connection Pod.
## @param test.image.tag Image tag for the wget container used in the test-connection Pod.
checkDeprecation: true
test:
enabled: true
image:
name: busybox
tag: latest
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []

View File

@ -0,0 +1,34 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: homeassistant-ingress
namespace: homeassistant
spec:
entryPoints:
- websecure
routes:
- match: Host(`home.kluster.moll.re`)
middlewares:
- name: homeassistant-websocket
kind: Rule
services:
- name: homeassistant-home-assistant
port: 8123
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: homeassistant-websocket
namespace: homeassistant
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: homeassistant
name: homeassistant-nfs
labels:
directory: homeassistant
spec:
storageClassName: slow
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/homeassistant
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: homeassistant
name: homeassistant-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: homeassistant

View File

@ -0,0 +1,136 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: homeassistant/home-assistant
# -- image tag
tag: "2023.3"
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables.
# @default -- See below
env:
# -- Set the container timezone
TZ: Europe/Berlin
# -- Configures service settings for the chart. Normally this does not need to be modified.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8123
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Enable devices to be discoverable
# hostNetwork: true
# -- When hostNetwork is true set dnsPolicy to ClusterFirstWithHostNet
# dnsPolicy: ClusterFirstWithHostNet
securityContext:
# -- (bool) Privileged securityContext may be required if USB devics are accessed directly through the host machine
privileged: # true
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "2"
memory: "1Gi"
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: true
existingClaim: homeassistant-nfs
# -- Configure a hostPathMount to mount a USB device in the container.
# @default -- See values.yaml
usb:
enabled: false
type: hostPath
hostPath: /path/to/device
# -- Enable and configure mariadb database subchart under this key.
# For more options see [mariadb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/mariadb)
# @default -- See values.yaml
mariadb:
enabled: false
architecture: standalone
auth:
database: home-assistant
username: home-assistant
password: home-assistant-pass
rootPassword: home-assistantrootpass
primary:
persistence:
enabled: false
# storageClass: ""
# -- Enable and configure postgresql database subchart under this key.
# For more options see [postgresql chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/postgresql)
# @default -- See values.yaml
postgresql:
enabled: false
image:
# -- Enable and configure influxdb database subchart under this key.
# For more options see [influxdb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/influxdb)
# @default -- See values.yaml
influxdb:
enabled: false
architecture: standalone
database: home_assistant
authEnabled: false
persistence:
enabled: false
# storageClass: ""
# size: 8Gi
metrics:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: false
serviceMonitor:
interval: 1m
scrapeTimeout: 30s
labels: {}
## See https://www.home-assistant.io/docs/authentication/ for where to find
## long lived access token creation under your account profile, which is
## needed to monitor Home Assistant
# bearerTokenSecret:
# name: ""
# key: ""
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: HomeAssistantAbsent
# annotations:
# description: Home Assistant has disappeared from Prometheus service discovery.
# summary: Home Assistant is down.
# expr: |
# absent(up{job=~".*home-assistant.*"} == 1)
# for: 5m
# labels:
# severity: critical

51
apps/immich/ingress.yaml Normal file
View File

@ -0,0 +1,51 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: stripprefix
spec:
stripPrefix:
prefixes:
- /api
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: websocket
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: immich-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`immich.kluster.moll.re`) && PathPrefix(`/api/`)
kind: Rule
services:
- name: immich-server
port: 3001
passHostHeader: true
middlewares:
- name: stripprefix
- name: websocket
- match: Host(`immich.kluster.moll.re`) && PathPrefix(`/`)
kind: Rule
services:
- name: immich-web
port: 3000
passHostHeader: true
middlewares:
- name: websocket
tls:
certResolver: default-tls

36
apps/immich/pvc.yaml Normal file
View File

@ -0,0 +1,36 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-nfs
labels:
directory: immich
spec:
storageClassName: fast
capacity:
storage: "50Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich
# path: /kluster/immich
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "50Gi"
selector:
matchLabels:
directory: immich

136
apps/immich/values.yaml Normal file
View File

@ -0,0 +1,136 @@
## This chart relies on the common library chart from bjw-s
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
## Refer there for more detail about the supported values
# These entries are shared between all the Immich components
env:
REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}'
DB_HOSTNAME: "{{ .Release.Name }}-postgresql"
DB_USERNAME: "{{ .Values.postgresql.global.postgresql.auth.username }}"
DB_DATABASE_NAME: "{{ .Values.postgresql.global.postgresql.auth.database }}"
# -- You should provide your own secret outside of this helm-chart and use `postgresql.global.postgresql.auth.existingSecret` to provide credentials to the postgresql instance
DB_PASSWORD: "{{ .Values.postgresql.global.postgresql.auth.password }}"
TYPESENSE_ENABLED: "{{ .Values.typesense.enabled }}"
TYPESENSE_API_KEY: "{{ .Values.typesense.env.TYPESENSE_API_KEY }}"
TYPESENSE_HOST: '{{ printf "%s-typesense" .Release.Name }}'
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_SERVER_URL: '{{ printf "http://%s-server:3001" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
image:
tag: v1.80.0
immich:
persistence:
# Main data store for all photos shared between different components.
library:
# Automatically creating the library volume is not supported by this chart
# You have to specify an existing PVC to use
existingClaim: immich-nfs
# Dependencies
postgresql:
enabled: true
global:
postgresql:
auth:
username: immich
database: immich
password: immich
redis:
enabled: true
architecture: standalone
auth:
enabled: false
typesense:
enabled: true
env:
TYPESENSE_DATA_DIR: /tsdata
TYPESENSE_API_KEY: typesense
persistence:
tsdata:
# Enabling typesense persistence is recommended to avoid slow reindexing
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
# storageClass: storage-class
image:
repository: docker.io/typesense/typesense
tag: 0.24.0
pullPolicy: IfNotPresent
# Immich components
server:
enabled: true
image:
repository: ghcr.io/immich-app/immich-server
pullPolicy: IfNotPresent
microservices:
enabled: true
env:
REVERSE_GEOCODING_DUMP_DIRECTORY: /geodata-cache
persistence:
geodata-cache:
enabled: true
size: 1Gi
# Optional: Set this to pvc to avoid downloading the geodata every start.
type: emptyDir
accessMode: ReadWriteMany
# storageClass: your-class
image:
repository: ghcr.io/immich-app/immich-server
pullPolicy: IfNotPresent
machine-learning:
enabled: true
image:
repository: ghcr.io/immich-app/immich-machine-learning
pullPolicy: IfNotPresent
env:
TRANSFORMERS_CACHE: /cache
persistence:
cache:
enabled: true
size: 10Gi
# Optional: Set this to pvc to avoid downloading the ML models every start.
type: emptyDir
accessMode: ReadWriteMany
# storageClass: your-class
web:
enabled: true
image:
repository: ghcr.io/immich-app/immich-web
pullPolicy: IfNotPresent
persistence:
library:
enabled: false
proxy:
enabled: true
image:
repository: ghcr.io/immich-app/immich-proxy
pullPolicy: IfNotPresent
persistence:
library:
enabled: false
ingress:
main:
enabled: false
annotations:
# proxy-body-size is set to 0 to remove the body limit on file uploads
nginx.ingress.kubernetes.io/proxy-body-size: "0"
hosts:
- host: immich.local
paths:
- path: "/"
tls: []

View File

@ -0,0 +1,32 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-ingress
namespace: media
spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`)
middlewares:
- name: jellyfin-websocket
kind: Rule
services:
- name: jellyfin
port: 8096
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: jellyfin-websocket
namespace: media
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
Upgrade: "websocket"

View File

@ -0,0 +1,72 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-config-nfs
labels:
directory: jellyfin
spec:
storageClassName: slow
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/jellyfin-config
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-config-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: jellyfin
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-data-nfs
labels:
directory: jellyfin
spec:
storageClassName: slow
capacity:
storage: "1Ti"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/jellyfin-media
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-data-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Ti"
selector:
matchLabels:
directory: jellyfin
---

View File

@ -0,0 +1,108 @@
image:
# -- image repository
repository: jellyfin/jellyfin
# -- image tag
tag: 10.8.9
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables. See [image docs](https://jellyfin.org/docs/general/administration/configuration.html) for more details.
# @default -- See below
env:
# -- Set the container timezone
TZ: Europe/Berlin
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8096
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: true
type: pvc
existingClaim: jellyfin-config-nfs
accessMode:
- ReadWriteOnce
# Cache does NOT contain temporary transcoding data.
cache:
enabled: false
mountPath: /cache
media:
enabled: true
# use local storage
type: pvc
existingClaim: jellyfin-data-nfs
accessMode:
- ReadWriteOnce
mountPath: /media
# encoder:
# enabled: true
# type: hostPath
# hostPath: /dev/dri/renderD128
# # -- Configure the Security Context for the Pod
# podSecurityContext:
# runAsUser: 0 # root user -> access to /dev/video*
# runAsUser: 568
# runAsGroup: 568
# fsGroup: 568
# # Hardware acceleration using an Intel iGPU w/ QuickSync
# # These IDs below should be matched to your `video` and `render` group on the host
# # To obtain those IDs run the following grep statement on the host:
# # $ cat /etc/group | grep "video\|render"
# # video:x:44:
# # render:x:109:
# supplementalGroups:
# - 44
# - 109
# resources:
# requests:
# # Hardware acceleration using an Intel iGPU w/ QuickSync and
# # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes)
# gpu.intel.com/i915: 1
# cpu: 200m
# memory: 256Mi
# limits:
# # Hardware acceleration using an Intel iGPU w/ QuickSync and
# # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes)
# gpu.intel.com/i915: 1
# memory: 4096Mi
probes:
# -- Liveness probe configuration
# @default -- See below
liveness:
# -- Enable the liveness probe
enabled: true
# -- Set this to `true` if you wish to specify your own livenessProbe
custom: true
# -- The spec field contains the values for the default livenessProbe.
# If you selected `custom: true`, this field holds the definition of the livenessProbe.
# @default -- See below
spec:
initialDelaySeconds: 100
periodSeconds: 100
timeoutSeconds: 5
failureThreshold: 3
httpGet:
path: /health
port: 8096

View File

@ -0,0 +1,17 @@
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
metadata:
name: grafana-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`grafana.kluster.moll.re`)
kind: Rule
services:
- name: grafana
port: 80
tls:
certResolver: default-tls

View File

@ -0,0 +1,35 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
labels:
directory: grafana
spec:
storageClassName: slow
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/grafana
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: grafana

View File

@ -0,0 +1,873 @@
rbac:
create: true
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
# useExistingRole: name-of-some-(cluster)role
pspEnabled: true
pspUseAppArmor: true
namespaced: false
extraRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
extraClusterRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
serviceAccount:
create: true
name:
nameTest:
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
autoMount: true
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Create HorizontalPodAutoscaler object for deployment type
#
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
readinessProbe:
httpGet:
path: /api/health
port: 3000
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: grafana/grafana
tag: 9.0.2
sha: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
# pullSecrets:
# - myRegistrKeySecretName
testFramework:
enabled: true
image: "bats/bats"
tag: "v1.4.1"
imagePullPolicy: IfNotPresent
securityContext: {}
securityContext:
runAsUser: 472
runAsGroup: 472
fsGroup: 472
containerSecurityContext:
{}
# Extra configmaps to mount in grafana pods
# Values are templated.
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/grafana/ssl/
# subPath: certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraEmptyDirMounts: []
# - name: provisioning-notifiers
# mountPath: /etc/grafana/provisioning/notifiers
# Apply extra labels to common labels.
extraLabels: {}
## Assign a PriorityClassName to pods if set
# priorityClassName:
downloadDashboardsImage:
repository: curlimages/curl
tag: 7.73.0
sha: ""
pullPolicy: IfNotPresent
downloadDashboards:
env: {}
envFromSecret: ""
resources: {}
## Pod Annotations
# podAnnotations: {}
## Pod Labels
# podLabels: {}
podPortName: grafana
## Deployment annotations
# annotations: {}
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
enabled: true
type: ClusterIP
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
annotations: {}
labels: {}
portName: service
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 1m
scheme: http
tlsConfig: {}
scrapeTimeout: 30s
relabelings: []
extraExposePorts: []
# - name: keycloak
# port: 8080
# targetPort: 8080
# type: ClusterIP
# overrides pod.spec.hostAliases in the grafana deployment's pods
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "my.host.com"
ingress:
enabled: true
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {
kubernetes.io/ingress.class: nginx,
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
# pathType is only for k8s >= 1.1=
pathType: Prefix
hosts:
- grafana.kluster.moll.re
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
tls:
- hosts:
- grafana.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Additional init containers (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
extraInitContainers: []
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
extraContainers: ""
# extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## Volumes that can be used in init containers that will not be mounted to deployment pods
extraContainerVolumes: []
# - name: volume-from-secret
# secret:
# secretName: secret-to-mount
# - name: empty-dir-volume
# emptyDir: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
type: pvc
enabled: true
# storageClassName: default
accessModes:
- ReadWriteOnce
size: 10Gi
# annotations: {}
finalizers:
- kubernetes.io/pvc-protection
# selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
existingClaim: grafana-nfs
## If persistence is not enabled, this allows to mount the
## local storage in-memory to improve performance
##
inMemory:
enabled: false
## The maximum usage on memory medium EmptyDir would be
## the minimum value between the SizeLimit specified
## here and the sum of memory limits of all containers in a pod
##
# sizeLimit: 300Mi
initChownData:
## If false, data ownership will not be reset at startup
## This allows the prometheus-server to be run with an arbitrary user
##
enabled: true
## initChownData container image
##
image:
repository: busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
## initChownData resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword
# Use an existing secret for the admin user.
admin:
## Name of the secret. Can be templated.
existingSecret: ""
userKey: admin-user
passwordKey: admin-password
## Define command to be executed at startup by grafana container
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
## Default is "run.sh" as defined in grafana's Dockerfile
# command:
# - "sh"
# - "/run.sh"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Extra environment variables that will be pass onto deployment pods
##
## to provide grafana with access to CloudWatch on AWS EKS:
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
## same oidc eks provider as noted before (same as the existing line)
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
##
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
##
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
##
## env:
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
## AWS_REGION: us-east-1
##
## 5. uncomment the EKS section in extraSecretMounts: below
## 6. uncomment the annotation section in the serviceAccount: above
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
env: {}
## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
## Renders in container spec as:
## env:
## ...
## - name: <key>
## valueFrom:
## <value rendered as YAML>
envValueFrom: {}
# ENV_NAME:
# configMapKeyRef:
# name: configmap-name
# key: value_key
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc. Value is templated.
envFromSecret: ""
## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envRenderSecret: {}
## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
## Name is templated.
envFromSecrets: []
## - name: secret-name
## optional: true
## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key.
## Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
envFromConfigMaps: []
## - name: configmap-name
## optional: true
# Inject Kubernetes services as environment variables.
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
enableServiceLinks: true
## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: grafana-secret-files
# readOnly: true
# subPath: ""
#
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
# - name: aws-iam-token
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
# readOnly: true
# projected:
# defaultMode: 420
# sources:
# - serviceAccountToken:
# audience: sts.amazonaws.com
# expirationSeconds: 86400
# path: token
#
# for CSI e.g. Azure Key Vault use the following
# - name: secrets-store-inline
# mountPath: /run/secrets
# readOnly: true
# csi:
# driver: secrets-store.csi.k8s.io
# readOnly: true
# volumeAttributes:
# secretProviderClass: "akv-grafana-spc"
# nodePublishSecretRef: # Only required when using service principal mode
# name: grafana-akv-creds # Only required when using service principal mode
## Additional grafana server volume mounts
# Defines additional volume mounts.
extraVolumeMounts: []
# - name: extra-volume-0
# mountPath: /mnt/volume0
# readOnly: true
# existingClaim: volume-claim
# - name: extra-volume-1
# mountPath: /mnt/volume1
# readOnly: true
# hostPath: /usr/shared/
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
lifecycleHooks: {}
# postStart:
# exec:
# command: []
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: Prometheus
# type: prometheus
# url: http://prometheus-prometheus-server
# access: proxy
# isDefault: true
# - name: CloudWatch
# type: cloudwatch
# access: proxy
# uid: cloudwatch
# editable: false
# jsonData:
# authType: default
# defaultRegion: us-east-1
## Configure notifiers
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
##
notifiers: {}
# notifiers.yaml:
# notifiers:
# - name: email-notifier
# type: email
# uid: email1
# # either:
# org_id: 1
# # or
# org_name: Main Org.
# is_default: true
# settings:
# addresses: an_email_address@example.com
# delete_notifiers:
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
# providers:
# - name: 'default'
# orgId: 1
# folder: ''
# type: file
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# custom-dashboard:
# file: dashboards/custom-dashboard.json
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
# token: ''
# local-dashboard-base64:
# url: https://example.com/repository/test-b64.json
# token: ''
# b64content: true
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
paths:
data: /var/lib/grafana/
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## grafana Authentication can be enabled with the following values on grafana.ini
# server:
# The full public facing url you use in browser, used for redirects and emails
# root_url:
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
# auth.github:
# enabled: false
# allow_sign_up: false
# scopes: user:email,read:org
# auth_url: https://github.com/login/oauth/authorize
# token_url: https://github.com/login/oauth/access_token
# api_url: https://api.github.com/user
# team_ids:
# allowed_organizations:
# client_id:
# client_secret:
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
# auth.ldap:
# enabled: true
# allow_sign_up: true
# config_file: /etc/grafana/ldap.toml
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
enabled: false
# `existingSecret` is a reference to an existing secret containing the ldap configuration
# for Grafana in a key `ldap-toml`.
existingSecret: ""
# `config` is the content of `ldap.toml` that will be stored in the created secret
config: ""
# config: |-
# verbose_logging = true
# [[servers]]
# host = "my-ldap-server"
# port = 636
# use_ssl = true
# start_tls = false
# ssl_skip_verify = false
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: ""
userKey: "user"
passwordKey: "password"
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.15.6
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
securityContext: {}
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
readinessProbe: {}
livenessProbe: {}
dashboards:
enabled: false
SCProvider: true
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# value of label that the configmaps with dashboards are set to
labelValue: null
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
folder: /tmp/dashboards
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
defaultFolderName: null
# Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces.
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
# Absolute path to shell script to execute after a configmap got reloaded
script: null
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600
#
# watchClientTimeout: is a client-side timeout, configuring your local socket.
# If you have a network outage dropping all packets with no RST/FIN,
# this is how long your client waits before realizing & dropping the connection.
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# provider configuration that lets grafana manage the dashboards
provider:
# name of the provider, should be unique
name: sidecarProvider
# orgid as configured in grafana
orgid: 1
# folder in which the dashboards should be imported in grafana
folder: ''
# type of the provider
type: file
# disableDelete to activate a import-only behaviour
disableDelete: false
# allow updating provisioned dashboards from the UI
allowUiUpdates: false
# allow Grafana to replicate dashboard structure from filesystem
foldersFromFilesStructure: false
# Additional dashboard sidecar volume mounts
extraMounts: []
# Sets the size limit of the dashboard sidecar emptyDir volume
sizeLimit: {}
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
# value of label that the configmaps with datasources are set to
labelValue: null
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload datasources
reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any datasources defined at startup time.
initDatasources: false
# Sets the size limit of the datasource sidecar emptyDir volume
sizeLimit: {}
plugins:
enabled: false
# label that the configmaps with plugins are marked with
label: grafana_plugin
# value of label that the configmaps with plugins are set to
labelValue: null
# If specified, the sidecar will search for plugin config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload plugins
reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any plugins defined at startup time.
initPlugins: false
# Sets the size limit of the plugin sidecar emptyDir volume
sizeLimit: {}
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
label: grafana_notifier
# If specified, the sidecar will search for notifier config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
# Sets the size limit of the notifier sidecar emptyDir volume
sizeLimit: {}
## Override the deployment namespace
##
namespaceOverride: ""
## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10
## Add a seperate remote image renderer deployment/service
imageRenderer:
# Enable the image-renderer deployment & service
enabled: false
replicas: 1
image:
# image-renderer Image repository
repository: grafana/grafana-image-renderer
# image-renderer Image tag
tag: latest
# image-renderer Image sha (optional)
sha: ""
# image-renderer ImagePullPolicy
pullPolicy: Always
# extra environment variables
env:
HTTP_HOST: "0.0.0.0"
# RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
# RENDERING_MODE: clustered
# IGNORE_HTTPS_ERRORS: true
# image-renderer deployment serviceAccount
serviceAccountName: ""
# image-renderer deployment securityContext
securityContext: {}
# image-renderer deployment Host Aliases
hostAliases: []
# image-renderer deployment priority class
priorityClassName: ''
service:
# Enable the image-renderer service
enabled: true
# image-renderer service port name
portName: 'http'
# image-renderer service port used by both service and deployment
port: 8081
targetPort: 8081
# If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
grafanaProtocol: http
# In case a sub_path is used this needs to be added to the image renderer callback
grafanaSubPath: ""
# name of the image-renderer port on the pod
podPortName: http
# number of image-renderer replica sets to keep
revisionHistoryLimit: 10
networkPolicy:
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
limitIngress: true
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
limitEgress: false
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# Create a dynamic manifests via values:
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"
# kind: ExternalSecret
# metadata:
# name: grafana-secrets
# spec:
# backendType: gcpSecretsManager
# data:
# - key: grafana-admin-password
# name: adminPassword

View File

@ -0,0 +1,157 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
image:
repo: "telegraf"
tag: "1.22"
pullPolicy: IfNotPresent
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
requests:
memory: 256Mi
cpu: 0.1
limits:
memory: 1Gi
cpu: 1
## Pod annotations
podAnnotations: {}
## Pod labels
podLabels: {}
## Configure args passed to Telegraf containers
args: []
## The name of a secret in the same kubernetes namespace which contains values to
## be added to the environment (must be manually created)
## This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
## Environment
env:
# This pulls HOSTNAME from the node, not the pod.
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# In test clusters where hostnames are resolved in /etc/hosts on each node,
# the HOSTNAME is not resolvable from inside containers
# So inject the host IP as well
- name: HOSTIP
valueFrom:
fieldRef:
fieldPath: status.hostIP
# Mount the host filesystem and set the appropriate env variables.
# ref: https://github.com/influxdata/telegraf/blob/master/docs/FAQ.md
# HOST_PROC is required by the cpu, disk, diskio, kernel and processes input plugins
- name: "HOST_PROC"
value: "/hostfs/proc"
# HOST_SYS is required by the diskio plugin
- name: "HOST_SYS"
value: "/hostfs/sys"
- name: "HOST_MOUNT_PREFIX"
value: "/hostfs"
## Add custom volumes and mounts
# volumes:
# - name: telegraf-output-influxdb2
# configMap:
# name: "telegraf-output-influxdb2"
# mountPoints:
# - name: telegraf-output-influxdb2
# mountPath: /etc/telegraf/conf.d
# subPath: influxdb2.conf
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## If the DaemonSet should run on the host's network namespace
## hostNetwork: true
## If using hostNetwork=true, set dnsPolicy to ClusterFirstWithHostNet
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#
## dnsPolicy: ClusterFirstWithHostNet
## If using dnsPolicy=None, set dnsConfig
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
## dnsConfig:
## nameservers:
## - 1.2.3.4
## searches:
## - ns1.svc.cluster-domain.example
## - my.dns.search.suffix
## options:
## - name: ndots
## value: "2"
## - name: edns0
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# Annotations for the ServiceAccount
annotations: {}
## Specify priorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: system-node-critical
# Specify the pod's SecurityContext, including the OS user and group to run the pod
podSecurityContext: {}
override_config:
toml: ~
# Provide a literal TOML config
# toml: |+
# [global_tags]
# foo = "bar"
# [agent]
# interval = "10s"
# [[inputs.mem]]
# [[outputs.influxdb_v2]]
# urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
# bucket = "data"
# organization = "OurCompany"
# token = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
## Exposed telegraf configuration
## ref: https://docs.influxdata.com/telegraf/v1.13/administration/configuration/
config:
# global_tags:
# cluster: "mycluster"
agent:
interval: "10s"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
organization: "influxdata"
bucket: "kluster"
monitor_self: false
docker_endpoint: "unix:///run/k3s/containerd/containerd.sock"

View File

@ -0,0 +1,35 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
labels:
directory: influxdb
spec:
storageClassName: slow
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/influxdb
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: influxdb-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: influxdb

View File

@ -0,0 +1,195 @@
image:
repository: influxdb
tag: 2.3.0-alpine
pullPolicy: IfNotPresent
## Annotations to be added to InfluxDB pods
##
podAnnotations: {}
## Labels to be added to InfluxDB pods
##
podLabels: {}
nameOverride: ""
fullnameOverride: ""
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
securityContext: {}
## Customize liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe: {}
# path: "/health"
# scheme: "HTTP"
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# failureThreshold: 3
readinessProbe: {}
# path: "/health"
# scheme: "HTTP"
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
startupProbe:
enabled: false
# path: "/health"
# scheme: "HTTP"
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 1
# failureThreshold: 6
## Extra environment variables to configure influxdb
## e.g.
# env:
# - name: FOO
# value: BAR
# - name: BAZ
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: my-key
env: {}
## Create default user through docker entrypoint
## Defaults indicated below
##
adminUser:
organization: "influxdata"
bucket: "default"
user: "admin"
retention_policy: "0s"
## Leave empty to generate a random password and token.
## Or fill any of these values to use fixed values.
password: ""
token: ""
## The password and token are obtained from an existing secret. The expected
## keys are `admin-password` and `admin-token`.
## If set, the password and token values above are ignored.
# existingSecret: influxdb-auth
## Persist data to a persistent volume
##
persistence:
enabled: true
## If true will use an existing PVC instead of creating one
useExisting: true
## Name of existing PVC to be used in the influx deployment
name: influxdb-nfs
## influxdb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
mountPath: /var/lib/influxdb2
subPath: ""
## Add custom volume and volumeMounts
##
# volumes:
# - name: influxdb2-templates
# hostPath:
# path: /data/influxdb2-templates
# type: Directory
# mountPoints:
# - name: influxdb2-templates
# mountPath: /influxdb2-templates
# readOnly: true
## Allow executing custom init scripts
## If the container finds any files with the .sh extension inside of the
## /docker-entrypoint-initdb.d folder, it will execute them.
## When multiple scripts are present, they will be executed in lexical sort order by name.
## For more details see Custom Initialization Scripts in https://hub.docker.com/_/influxdb
initScripts:
enabled: false
scripts:
init.sh: |+
#!/bin/bash
influx apply --force yes -u https://raw.githubusercontent.com/influxdata/community-templates/master/influxdb2_operational_monitoring/influxdb2_operational_monitoring.yml
## Specify a service type
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: LoadBalancer
loadBalancerIP: 192.168.3.4
port: 80
targetPort: 8086
annotations: {}
labels: {}
portName: http
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Annotations for the ServiceAccount
annotations: {}
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# className: nginx
tls: false
# secretName: my-tls-cert # only needed if tls above is true or default certificate is not configured for Nginx
hostname: influxdb.foobar.com
annotations: {}
# kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
path: /
## Pod disruption budget configuration
##
pdb:
## Specifies whether a Pod disruption budget should be created
##
create: true
minAvailable: 1
# maxUnavailable: 1

View File

@ -0,0 +1,167 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.25"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-polling-service"
# An older "volumeMounts" key was previously added which will likely
# NOT WORK as you expect. Please use this newer configuration.
# volumes:
# - name: telegraf-output-influxdb2
# configMap:
# name: "telegraf-output-influxdb2"
# mountPoints:
# - name: telegraf-output-influxdb2
# mountPath: /etc/telegraf/conf.d
# subPath: influxdb2.conf
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 128Mi
# cpu: 100m
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
type: ClusterIP
annotations: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
# Create only for the release namespace or cluster wide (Role vs ClusterRole)
clusterWide: false
# Rules for the created rule
rules: []
# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be
# able to scan the pods for scraping labels. The following rules have been taken from:
# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46
# - apiGroups:
# - ""
# resources:
# - nodes
# - nodes/proxy
# - nodes/metrics
# - services
# - endpoints
# - pods
# - ingresses
# - configmaps
# verbs:
# - get
# - list
# - watch
# - apiGroups:
# - "extensions"
# resources:
# - ingresses/status
# - ingresses
# verbs:
# - get
# - list
# - watch
# - nonResourceURLs:
# - "/metrics"
# verbs:
# - get
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "2m"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
processors:
- enum:
mapping:
field: "status"
dest: "status_code"
value_mappings:
healthy: 1
problem: 2
critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA==
organization: "influxdata"
bucket: "homeassistant"
inputs:
- http:
urls:
- "http://adguard-home.adguard:3000/control/stats"
data_format: "json"
metrics:
health:
enabled: false
service_address: "http://:8888"
threshold: 5000.0
internal:
enabled: true
collect_memstats: false
# Lifecycle hooks
# hooks:
# postStart: ["/bin/sh", "-c", "echo Telegraf started"]
# preStop: ["/bin/sh", "-c", "sleep 60"]
## Pod disruption budget configuration
##
pdb:
## Specifies whether a Pod disruption budget should be created
##
create: true
minAvailable: 1
# maxUnavailable: 1

View File

@ -0,0 +1,110 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.25"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-speedtest"
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 128Mi
# cpu: 100m
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
rbac:
# Specifies whether RBAC resources should be created
create: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "2h"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
processors:
- enum:
mapping:
field: "status"
dest: "status_code"
value_mappings:
healthy: 1
problem: 2
critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA==
organization: "influxdata"
bucket: "homeassistant"
inputs:
- internet_speed:
enable_file_download: false
# Lifecycle hooks
# hooks:
# postStart: ["/bin/sh", "-c", "echo Telegraf started"]
# preStop: ["/bin/sh", "-c", "sleep 60"]
## Pod disruption budget configuration
##
pdb:
## Specifies whether a Pod disruption budget should be created
##
create: true
minAvailable: 1
# maxUnavailable: 1

View File

@ -0,0 +1,17 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: nextcloud
name: nextcloud-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud.kluster.moll.re`)
kind: Rule
services:
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

34
apps/nextcloud/pvc.yaml Normal file
View File

@ -0,0 +1,34 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: nextcloud
name: nextcloud-nfs
labels:
directory: nextcloud
spec:
storageClassName: fast
capacity:
storage: "150Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/nextcloud
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: nextcloud
name: nextcloud-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
selector:
matchLabels:
directory: nextcloud

294
apps/nextcloud/values.yaml Normal file
View File

@ -0,0 +1,294 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
##
image:
repository: nextcloud
tag: "27" # needs to be a string because of the template
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
deploymentAnnotations: {}
# Number of replicas to be deployed
replicaCount: 1
## Allowing use of ingress controllers
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# postStartCommand: []
# preStopCommand: []
nextcloud:
host: nextcloud.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
## Strategy used to replace old pods
## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
strategy:
type: Recreate
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
##
## Extra environment variables
extraEnv:
# - name: SOME_SECRET_ENV
# valueFrom:
# secretKeyRef:
# name: nextcloud
# key: secret_key
# Extra mounts for the pods. Example shown is for connecting a legacy NFS volume
# to NextCloud pods in Kubernetes. This can then be configured in External Storage
extraVolumes:
# - name: nfs
# nfs:
# server: "10.0.0.1"
# path: "/nextcloud_data"
# readOnly: false
extraVolumeMounts:
# - name: nfs
# mountPath: "/legacy_data"
# Extra secuurityContext parameters. For example you may need to define runAsNonRoot directive
# extraSecurityContext:
# runAsUser: "33"
# runAsGroup: "33"
# runAsNonRoot: true
# readOnlyRootFilesystem: true
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
resources: {}
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: true
## Supported database engines: mysql or postgresql
type: postgresql
## Database host
host: postgres-postgresql.postgres
## Database user
user: nextcloud
## Database password
password: test
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
# usernameKey: username
# passwordKey: password
##
## MariaDB chart configuration
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: false
postgresql:
enabled: false
##
## Redis chart configuration
## for more options see https://github.com/bitnami/charts/tree/master/bitnami/redis
##
redis:
enabled: false
auth:
enabled: true
password: 'changeme'
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron
##
cronjob:
enabled: false
# Nexcl
service:
type: ClusterIP
port: 8080
loadBalancerIP: nil
nodePort: nil
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## nextcloud data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-nfs
accessMode: ReadWriteOnce
size: 150Gi
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
subPath:
annotations: {}
# storageClass: "-"
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 250
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 250
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 250
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 30
successThreshold: 1
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
nodeSelector: {}
tolerations: []
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false
serviceaccount:
create: true
name: nextcloud-serviceaccount

View File

@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pix2tex
labels:
app: pix2tex
spec:
replicas: 1
selector:
matchLabels:
app: pix2tex
template:
metadata:
labels:
app: pix2tex
spec:
containers:
- name: pix2tex
image: lukasblecher/pix2tex:api
tty: true
resources:
requests:
memory: "250M"
cpu: 500m
ephemeral-storage: "2Gi"
limits:
ephemeral-storage: "4Gi"
memory: "500M"
cpu: 1000m
ports:
- containerPort: 8501
command: ["python", "pix2tex/api/run.py"]
nodeSelector:
kubernetes.io/arch: amd64
---
apiVersion: v1
kind: Service
metadata:
name: pix2tex-http
namespace: pix2tex
labels:
app: pix2tex
spec:
ports:
- name: http
port: 8501
targetPort: 8501
selector:
app: pix2tex
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: pix2tex-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`pix2tex.kluster.moll.re`)
kind: Rule
middlewares:
- name: pix2tex-websocket
services:
- name: pix2tex-http
port: 8501
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: pix2tex-websocket
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@ -0,0 +1,53 @@
kind: Namespace
apiVersion: v1
metadata:
name: whoami
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: whoami
namespace: whoami
labels:
app: traefiklabs
name: whoami
spec:
selector:
matchLabels:
app: traefiklabs
task: whoami
template:
metadata:
labels:
app: traefiklabs
task: whoami
spec:
containers:
- name: whoami
image: traefik/whoami
ports:
- containerPort: 80
resources:
requests:
cpu: "5m"
memory: "5Mi"
limits:
cpu: "10m"
memory: "10Mi"
---
apiVersion: v1
kind: Service
metadata:
name: whoami
namespace: whoami
spec:
ports:
- name: http
port: 80
selector:
app: traefiklabs
task: whoami

View File

@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: whoami
name: whoami-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.kluster.moll.re`)
kind: Rule
services:
- name: whoami
port: 80
tls:
certResolver: default-tls

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./deployment.yaml
- ./ingress.yaml

View File

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base

View File

@ -0,0 +1,43 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: restic-rclone-gdrive
spec:
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
restartPolicy: Never
hostname: restic-k3s-pod
# used by restic to identify the host
containers:
- name: restic-base-container
image: restic/restic:latest
command:
- /bin/sh
- -c
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
args: []
volumeMounts:
- mountPath: /data
name: backup-nfs-access
env:
- name: RESTIC_REPOSITORY
value: rest:http://rclone-gcloud:8000/kluster
# lives in the same namespace
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: restic-gdrive-credentials
key: restic-password
volumes:
- name: backup-nfs-access
persistentVolumeClaim:
claimName: backup-nfs-access

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./cronjob.yaml
- ./restic-password.secret.yaml

View File

@ -0,0 +1,8 @@
```
k kustomize backup/overlays/backup | k apply -f -
> secret/restic-credentials-backup created
> cronjob.batch/restic-backblaze-backup created
k kustomize backup/overlays/prune | k apply -f -
> secret/restic-credentials-prune created
> cronjob.batch/restic-backblaze-prune created
```

View File

@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# namespace: backup
nameSuffix: -backup
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -0,0 +1,25 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-patch
spec:
schedule: "0 2 * * *"
# at 2:00, every day
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# -r $(RESTIC_REPOSITORY) not needed, bc set as env var
- >-
restic backup
--verbose=2
/data
--exclude=s3/
&&
restic
list snapshots

View File

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# namespace: backup
nameSuffix: -prune
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -0,0 +1,24 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: prune-patch
spec:
schedule: "0 0 1/15 * *"
# at midnight, the first and 15. of every month
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# RESTIC_REPOSITORY is set in the secret
- >-
restic forget
-r $(RESTIC_REPOSITORY)
--verbose=2
--keep-daily 7 --keep-weekly 5
--prune

View File

@ -0,0 +1,22 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "rclone-config-files",
"namespace": "backup",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "rclone-config-files",
"namespace": "backup",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"rclone.conf": "AgCQ13IG+R6bs+nAxe1xuDXttYlvGlLfV3oQ6c0qtoF2jXB8hN3LftydHn+Se3LjghmQKAIErfsA7ZRhJoWfFuSm2AIc3w2mMonsga5gjBx/56/tZSvnT2Bzn/5UXktTVxwEINSBP0dYiMcn4/G+5hO3bngmG+lCZXeI7yWoTW8H+8NKYxDHUzdoBBhPPPLTERTRZHB8EzOPUlefHq/2y/NpUfkxyLSjYk0/X45W6XNzH6MfdA2x6omxd4giDQSEwJGdXqIXu1rPnPjV7WVcA8qJzkQbxhzjqpUcFgM12YsLGVVW8HSSdAy+ZNdTXmhCIu2+pI+AVuol4QY9r/gU3xlGhFmc3asW5k4iOfn7/ZEr3Yk8JplAYM+GWQ07s59MqYdGOhqFUpVmkjO97Z29iaeReQZCwxzl/PmxUtfI20eTmtUlFKE3fObMr27sZcXgeJS3ktHOONGoqvHHeuqd4hfTaVAGwVOAEoBY8Xnkq3ECN5ld8V4zR8e52QHtANflN4IJgjnGO5pMQyAW+XASAJDxG48q7ruu9i0mI4vuM0rVuoWi2v9I30/M7Mv2xAYnmKC7NIao1mDya3paidHwkIu12480oBDdHZpm5NSqHtQr/HKMQWnbu6CrufrDmTqoVe/ew5uaqjbfrBBys35k5ObUUPlhU3putgfmsR3YZXDaAqOwIoXQ30wm02gCA5z/WNEY3EaKP6RhgsowwkrPPniQfz4EaxQQjmZ/toe/xpwzSZjmoVnJtJabiuqL/B/eY6WpNOTjOzsc7Z69EOyhZMs41gNoA32RRUbFO1ppOu8518cE12KpsGbH6K6NcucSrKh2Gd3xNGwjaGQVT2vLTVi9YwByiwvrsVpNU06f2v0fcZWeRgoFoUkKMj746lw0E+X7oF0+PmfPT2IeTRszHECkbStSvFZNDivcdJyDFutocAZKNjDoAnVPlTNVYwKKcmHvw3sOOXhVN7NOj/+9UxSNyRvip7GPZKtRF1u9ftlD6OaLCCVSip7MJ41a7TugBTUUaMJbQUTmidWKZn6A0nctAvdrPbBatPI2BZQ4amwdXa2bWyE7DI13WaCm6kAVJijsAmfVrVX3C+Ft5p8unbjsVQ/ErdpKTjlq9mJsie3TQdME5r74GlcURiVXdLc7KcV7vpf6yy88XS6ee+Y9WmlYDAwRX+taMilRDlunMeF5Zmh12DCXMzsradEifEOZ/Mg5BMznxvrZv3iHDArm/j4QW7Bi0To3+f2826IAaXMlI4ze7e9Ny3NUbgy85yE+RNYiio0+wvWRKraxpqI0EODy/juBed3VcoWlOfch0hKU4BZTVrU5rDEmwYcp6oWnXE92fhVH7wjy4IV3WUSubYg="
}
}
}

View File

@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rclone-gcloud
spec:
selector:
matchLabels:
app: rclone-gcloud
template:
metadata:
labels:
app: rclone-gcloud
spec:
containers:
- name: rclone
image: rclone/rclone:latest
command: ["/bin/sh", "-c"]
args: # mounted as a secret
# >- strips newlines
# sleep infinity
- >-
rclone
--config /config/rclone.conf
serve restic
--addr :8000
-v
ETHZ-gdrive:backup
volumeMounts:
# from secret
- name: rclone-config
mountPath: /config
readOnly: true
volumes:
- name: rclone-config
secret:
secretName: rclone-config-files
---
apiVersion: v1
kind: Service
metadata:
name: rclone-gcloud
spec:
selector:
app: rclone-gcloud
ports:
- protocol: TCP
port: 8000
targetPort: 8000

View File

@ -0,0 +1,2 @@
export RESTIC_REPOSITORY=rest:http://127.0.0.1:8000/kluster
export RESTIC_PASSWORD="2r,TE0.,U@gni3e%xr)_LC64"

View File

@ -0,0 +1,38 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: omv-s3-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`s3.kluster.moll.re`)
kind: Rule
services:
- name: omv-s3
port: 9000
# scheme: https
tls:
certResolver: default-tls
---
apiVersion: v1
kind: Endpoints
metadata:
name: omv-s3
namespace: external
subsets:
- addresses:
- ip: 192.168.1.157
ports:
- port: 9000
---
apiVersion: v1
kind: Service
metadata:
name: omv-s3
namespace: external
spec:
ports:
- port: 9000
targetPort: 9000

View File

@ -0,0 +1,38 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: omv-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`omv.kluster.moll.re`)
kind: Rule
services:
- name: omv
port: 443
scheme: https
tls:
certResolver: default-tls
---
apiVersion: v1
kind: Endpoints
metadata:
name: omv
namespace: external
subsets:
- addresses:
- ip: 192.168.1.157
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: omv
namespace: external
spec:
ports:
- port: 443
targetPort: 443

View File

@ -0,0 +1,55 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: proxmox-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`proxmox.kluster.moll.re`)
middlewares:
- name: proxmox-websocket
kind: Rule
services:
- name: proxmox
port: 8006
scheme: https
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: proxmox-websocket
namespace: external
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"
---
apiVersion: v1
kind: Endpoints
metadata:
name: proxmox
namespace: external
subsets:
- addresses:
- ip: 192.168.1.150
ports:
- port: 8006
---
apiVersion: v1
kind: Service
metadata:
name: proxmox
namespace: external
spec:
ports:
- port: 8006
targetPort: 8006

View File

@ -0,0 +1,2 @@
name: metallb
chart: metallb/metallb

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
spec:
secretTemplates:
- name: secret-1
labels:
label1: value1
annotations:
key1: value1
stringData:
data-name0: data-value0
data:
data-name1: ZGF0YS12YWx1ZTE=

View File

@ -0,0 +1,14 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- 192.168.3.0/24
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: empty
namespace: metallb-system

View File

@ -0,0 +1,337 @@
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# existingConfigMap: "config"
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: false
image:
repository: quay.io/frrouting/frr
tag: 7.5.1
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true
validationFailurePolicy: Fail

View File

@ -0,0 +1,13 @@
```
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
```

View File

@ -0,0 +1,9 @@
namespace: nfs-provisioner
bases:
- github.com/kubernetes-sigs/nfs-subdir-external-provisioner//deploy
resources:
- namespace.yaml
patchesStrategicMerge:
- nfs_values.yaml

View File

@ -0,0 +1,5 @@
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioner

View File

@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfs-client-provisioner
name: nfs-client-provisioner
spec:
template:
spec:
containers:
- name: nfs-client-provisioner
env:
- name: NFS_SERVER
value: 192.168.1.157
- name: NFS_PATH
value: /export/kluster/
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.157
path: /export/kluster/

View File

@ -0,0 +1,13 @@
Create a new role by executing the createuser command. With the options below, the new role will not be a superuser and will not have privileges for creating new databases or new roles (this is usually the default for the createuser command).
k exec -it -n postgres postgres-postgresql-0 -- bash
```
createuser -U postgres USER_NAME -S -D -R -P
```
You will be prompted to enter first the password for the new role and to reenter it, and then to enter the postgres role password.
Create a new database with the new role as the owner:
```
createdb -U postgres DATABASE_NAME -O USER_NAME
```

View File

@ -0,0 +1,2 @@
name: postgres
chart: bitnami/postgresql

View File

@ -0,0 +1,21 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "postgres-password",
"namespace": "postgres",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "postgres-password",
"namespace": "postgres",
"creationTimestamp": null
}
},
"encryptedData": {
"password": "AgCVytxZbe1yjT7OQuA7LocPTgn6Ikx9pDJAA49Ktboy86dJWlxnBke23O0qn3ELFTGUTDaMhBcJB0neqA0RjTTW3o7PsvbxEBvrP5F1EK4jN2vHti8Jgt/CUbOlJVfFuGPaL2DG9M7vafUnL3AQvZv/YkL79Q32Wcg9nPq+4iT7fTGQzUu22G6bmKJv/SnByAnBIzZRsL3R3pP4J7suG+5+K6PDlNRbIb0mIoy1vjBz5PKQAR2Hrh1+kLFIJEIwDuinSDHRDUoa9fChC52x/Oc4PavFw8RWTXjot5cnEOkUK3umSx0jnD247nPc8sRW87hmHE3O/T+doDqEetQxtarSNPxCZXwkVJCIAxg48M29mdkPiOUu2Rr9W9w+HnN8j7mA2rHYAxxi3KPeDBL7kaFH+Xtyv+MT6upRr9BHfSbA/gMPjT37dJmbEYJAvEEyZZJK6TpXUkLh3jnhg1P180t8AnJVX4KQhjUm+UmgUCytxEjp082vxoKEHop6I7f4qzUYfudaG825i0zL11yjSvUbQbdoe8j3C5pNs5OgNBboGqYGfreCcp76zKdNrNI6GYhtj04AuOQZP5SD9/bqsP4JW4yFYsWsq3XuqIxE/2ExCRvDOFu2H1rnPnkcvUYr30doYPIugP40l7AY18YucUsbH19ww7jM1TOejo5QS5wb39uygwf4j0+XjbD3iV12AQzaEnk/pfo="
}
}
}

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: postgres
name: postgres-nfs
labels:
directory: postgres
spec:
storageClassName: fast
capacity:
storage: "50Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteMany
nfs:
path: /export/kluster/postgres
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: postgres
name: postgres-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteMany
resources:
requests:
storage: "50Gi"
selector:
matchLabels:
directory: postgres

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,377 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: {}
labels:
name: sealed-secrets-service-proxier
name: sealed-secrets-service-proxier
namespace: kube-system
rules:
- apiGroups:
- ""
resourceNames:
- sealed-secrets-controller
resources:
- services
verbs:
- get
- apiGroups:
- ""
resourceNames:
- 'http:sealed-secrets-controller:'
- http:sealed-secrets-controller:http
- sealed-secrets-controller
resources:
- services/proxy
verbs:
- create
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sealed-secrets-key-admin
subjects:
- kind: ServiceAccount
name: sealed-secrets-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: {}
labels:
name: sealed-secrets-key-admin
name: sealed-secrets-key-admin
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: {}
labels:
name: secrets-unsealer
name: secrets-unsealer
rules:
- apiGroups:
- bitnami.com
resources:
- sealedsecrets
verbs:
- get
- list
- watch
- apiGroups:
- bitnami.com
resources:
- sealedsecrets/status
verbs:
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-service-proxier
name: sealed-secrets-service-proxier
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sealed-secrets-service-proxier
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:authenticated
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
spec:
minReadySeconds: 30
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
name: sealed-secrets-controller
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
spec:
containers:
- args: []
command:
- controller
env: []
image: docker.io/bitnami/sealed-secrets-controller:v0.23.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: http
name: sealed-secrets-controller
ports:
- containerPort: 8080
name: http
readinessProbe:
httpGet:
path: /healthz
port: http
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
stdin: false
tty: false
volumeMounts:
- mountPath: /tmp
name: tmp
imagePullSecrets: []
initContainers: []
securityContext:
fsGroup: 65534
runAsNonRoot: true
runAsUser: 1001
seccompProfile:
type: RuntimeDefault
serviceAccountName: sealed-secrets-controller
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: tmp
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: sealedsecrets.bitnami.com
spec:
group: bitnami.com
names:
kind: SealedSecret
listKind: SealedSecretList
plural: sealedsecrets
singular: sealedsecret
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: SealedSecret is the K8s representation of a "sealed Secret" -
a regular k8s Secret that has been sealed (encrypted) using the controller's
key.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: SealedSecretSpec is the specification of a SealedSecret
properties:
data:
description: Data is deprecated and will be removed eventually. Use
per-value EncryptedData instead.
format: byte
type: string
encryptedData:
additionalProperties:
type: string
type: object
x-kubernetes-preserve-unknown-fields: true
template:
description: Template defines the structure of the Secret that will
be created from this sealed secret.
properties:
data:
additionalProperties:
type: string
description: Keys that should be templated using decrypted data
nullable: true
type: object
metadata:
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata'
nullable: true
properties:
annotations:
additionalProperties:
type: string
type: object
finalizers:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
name:
type: string
namespace:
type: string
type: object
x-kubernetes-preserve-unknown-fields: true
type:
description: Used to facilitate programmatic handling of secret
data.
type: string
type: object
required:
- encryptedData
type: object
status:
description: SealedSecretStatus is the most recently observed status of
the SealedSecret.
properties:
conditions:
description: Represents the latest available observations of a sealed
secret's current state.
items:
description: SealedSecretCondition describes the state of a sealed
secret at a certain point.
properties:
lastTransitionTime:
description: Last time the condition transitioned from one status
to another.
format: date-time
type: string
lastUpdateTime:
description: The last time this condition was updated.
format: date-time
type: string
message:
description: A human readable message indicating details about
the transition.
type: string
reason:
description: The reason for the condition's last transition.
type: string
status:
description: 'Status of the condition for a sealed secret. Valid
values for "Synced": "True", "False", or "Unknown".'
type: string
type:
description: 'Type of condition for a sealed secret. Valid value:
"Synced"'
type: string
required:
- status
- type
type: object
type: array
observedGeneration:
description: ObservedGeneration reflects the generation most recently
observed by the sealed-secrets controller.
format: int64
type: integer
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
---
apiVersion: v1
kind: Service
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
spec:
ports:
- port: 8080
targetPort: 8080
selector:
name: sealed-secrets-controller
type: ClusterIP
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: secrets-unsealer
subjects:
- kind: ServiceAccount
name: sealed-secrets-controller
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system

View File

@ -0,0 +1,2 @@
name: traefik
chart: traefik/traefik

View File

@ -0,0 +1,87 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-config
namespace: traefik-system
data:
traefik.toml: |
[ping]
[global]
checkNewVersion = true
sendAnonymousUsage = false
[log]
level = "INFO"
[accessLog]
# format = "json"
# filePath = "/var/log/traefik/access.log"
[accessLog.fields]
defaultMode = "keep"
[accessLog.fields.names]
"RequestProtocol" = "drop"
"level" = "drop"
"RequestContentSize" = "drop"
"RequestScheme" = "drop"
"StartLocal" = "drop"
"StartUTC" = "drop"
# ClientUsername: drop
# DownstreamStatusLine: drop
# RequestAddr: drop
# RequestCount: drop
# RequestHost: drop
# RequestLine: drop
# UpstreamAddr: drop
# UpstreamStatusLine: drop
# duration: drop
# msg: drop
# time: drop
# upstream: drop
# user_agent: drop
[api]
dashboard = true
insecure = true
debug = false
[providers]
[providers.kubernetesCRD]
allowCrossNamespace = true
[providers.kubernetesIngress]
allowExternalNameServices = true
ingressClass = "traefik"
[serversTransport]
insecureSkipVerify = true
[entryPoints]
[entryPoints.web]
address = ":8000"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entryPoint]
to = ":443" # should be the same as websecure but the loadbalancer maps 443 -> 8443
scheme = "https"
[entryPoints.websecure]
address = ":8443"
[entryPoints.metrics]
address = ":9100"
[entryPoints.traefik]
address = ":9000"
[metrics]
[metrics.influxDB2]
address = "http://influxdb-influxdb2.monitoring:80"
token = "N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ=="
org = "influxdata"
bucket = "kluster"
[certificatesResolvers.default-tls.acme]
email = "me@moll.re"
storage = "/certs/acme.json"
[certificatesResolvers.default-tls.acme.tlsChallenge]
[experimental.plugins.traefik-plugin-geoblock]
moduleName = "github.com/nscuro/traefik-plugin-geoblock"
version = "v0.10.0"

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: traefik-system
name: traefik-certificate
labels:
directory: traefik
spec:
storageClassName: fast
capacity:
storage: "10Mi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/traefik/certs
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: traefik-system
name: traefik-certificate
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Mi"
selector:
matchLabels:
directory: traefik

View File

@ -0,0 +1,2 @@
name: telegraf-traefik
chart: influxdata/telegraf

View File

@ -0,0 +1,151 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.24"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-polling-service"
# An older "volumeMounts" key was previously added which will likely
# NOT WORK as you expect. Please use this newer configuration.
volumes:
- name: traefik-logs
persistentVolumeClaim:
claimName: traefik-logs
mountPoints:
- name: traefik-logs
mountPath: /traefik_logs
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: # to read the traefik logs the pod must be on the same node as traefik
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: # matches labels: app.kubernetes.io/name=traefik
- key: app.kubernetes.io/name
operator: In
values:
- traefik
topologyKey: "kubernetes.io/hostname"
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
type: ClusterIP
annotations: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
# Create only for the release namespace or cluster wide (Role vs ClusterRole)
clusterWide: false
# Rules for the created rule
rules: []
# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be
# able to scan the pods for scraping labels. The following rules have been taken from:
# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46
# - apiGroups:
# - ""
# resources:
# - nodes
# - nodes/proxy
# - nodes/metrics
# - services
# - endpoints
# - pods
# - ingresses
# - configmaps
# verbs:
# - get
# - list
# - watch
# - apiGroups:
# - "extensions"
# resources:
# - ingresses/status
# - ingresses
# verbs:
# - get
# - list
# - watch
# - nonResourceURLs:
# - "/metrics"
# verbs:
# - get
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Annotations for the ServiceAccount
annotations: {}
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "10s"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: true
# processors:
# - enum:
# mapping:
# field: "status"
# dest: "status_code"-+
# value_mappings:
# healthy: 1
# problem: 2
# critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
organization: "influxdata"
bucket: "kluster"
# retention_policy: "2w"
inputs:
- docker_log:
endpoint: "unix:///var/run/docker.sock"
from_beginning: false
container_name_include: ["traefik"]

View File

@ -0,0 +1,241 @@
# Default values for Traefik
image:
name: traefik
# defaults to appVersion
tag: ""
pullPolicy: IfNotPresent
#
# Configure the deployment
#
deployment:
enabled: true
# Can be either Deployment or DaemonSet
kind: Deployment
# Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1
# Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60
# The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0
# Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {}
# Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {}
# Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
# Additional containers (e.g. for metric offloading sidecars)
additionalContainers: []
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
# - name: socat-proxy
# image: alpine/socat:1.0.5
# args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
# volumeMounts:
# - name: dsdsocket
# mountPath: /socket
# Additional volumes available for use with initContainers and additionalContainers
additionalVolumes:
# - name: traefik-logs
# persistentVolumeClaim:
# claimName: traefik-logs
- name: traefik-certificate
persistentVolumeClaim:
claimName: traefik-certificate
- name: traefik-config
configMap:
name: traefik-config
# - name: dsdsocket
# hostPath:
# path: /var/run/statsd-exporter
# Additional initContainers (e.g. for setting file permission as shown below)
initContainers: []
# The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik/issues/6972
# - name: volume-permissions
# image: busybox:1.31.1
# command: ["sh", "-c", "chmod -Rv 600 /data/*"]
# volumeMounts:
# - name: data
# mountPath: /data
# Use process namespace sharing
shareProcessNamespace: false
# Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet
# Additional imagePullSecrets
imagePullSecrets: []
# - name: myRegistryKeySecretName
# Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x
ingressClass:
# true is not unit-testable yet, pending https://github.com/rancher/helm-unittest/pull/12
enabled: true
isDefaultClass: true
# Use to force a networking.k8s.io API Version for certain CI/CD applications. E.g. "v1beta1"
fallbackApiVersion: ""
# Activate Pilot integration
pilot:
enabled: false
token: ""
# Toggle Pilot Dashboard
# dashboard: false
# Enable experimental features
experimental:
http3:
enabled: false
plugins:
enabled: false
kubernetesGateway:
enabled: false
# certificate:
# group: "core"
# kind: "Secret"
# name: "mysecret"
# By default, Gateway would be created to the Namespace you are deploying Traefik to.
# You may create that Gateway in another namespace, setting its name below:
# namespace: default
# Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
enabled: false
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {}
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
#
# Configure providers
#
providers:
kubernetesCRD:
enabled: true
allowCrossNamespace: false
allowExternalNameServices: true
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
kubernetesIngress:
enabled: true
allowExternalNameServices: true
allowEmptyServices: false
ingressClass: traefik
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
# - "--ping"
# - "--ping.entrypoint=web"
volumes: []
# - name: traefik-config
# mountPath: /config
# configMap:
# name: traefik-config
# - name: public-cert
# mountPath: "/certs"
# type: secret
# - name: '{{ printf "%s-configs" .Release.Name }}'
# mountPath: "/config"
# type: configMap
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts:
# - name: traefik-logs
# mountPath: /var/log/traefik
# nfs:
# server: 192.168.1.157
# path: /kluster/traefik
# # For instance when using a logshipper for access logs
# - name: traefik-logs
# # claimName: traefik-logs
# mountPath: /var/log/traefik
- name: traefik-certificate
# claimName: traefik-certificate
mountPath: /certs
- name: traefik-config
mountPath: /config
globalArguments:
- "--configfile=/config/traefik.toml"
additionalArguments: []
# Environment variables to be passed to Traefik's binary
env:
- name: TZ
value: "Europe/Berlin"
# - name: SOME_VAR
# value: some-var-value
# - name: SOME_VAR_FROM_CONFIG_MAP
# valueFrom:
# configMapRef:
# name: configmap-name
# key: config-key
# - name: SOME_SECRET
# valueFrom:
# secretKeyRef:
# name: secret-name
# key: secret-key
# Configure ports
ports: {} # leave unconfigured to use the values from the toml file
envFrom: []
# - configMapRef:
# name: config-map-name
# - secretRef:
# name: secret-name
tlsOptions: {}
# Options for the main traefik service, where the entrypoints traffic comes
# from.
service:
enabled: true
type: LoadBalancer
# Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
annotations: {}
# Additional annotations for TCP service only
annotationsTCP: {}
# Additional annotations for UDP service only
annotationsUDP: {}
# Additional service labels (e.g. for filtering Service by custom labels)
labels: {}
# Additional entries here will be added to the service spec.
# Cannot contain type, selector or ports entries.
spec:
# externalTrafficPolicy: Local
loadBalancerIP: 192.168.3.1

View File

@ -0,0 +1,56 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
annotations:
metadata.weave.works/description: This is the source location for the Weave GitOps
Dashboard's helm chart.
labels:
app.kubernetes.io/component: ui
app.kubernetes.io/created-by: weave-gitops-cli
app.kubernetes.io/name: weave-gitops-dashboard
app.kubernetes.io/part-of: weave-gitops
name: flux-dashboard
namespace: flux-system
spec:
interval: 1h0m0s
type: oci
url: oci://ghcr.io/weaveworks/charts
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
annotations:
metadata.weave.works/description: This is the Weave GitOps Dashboard. It provides
a simple way to get insights into your GitOps workloads.
name: flux-dashboard
namespace: flux-system
spec:
chart:
spec:
chart: weave-gitops
sourceRef:
kind: HelmRepository
name: flux-dashboard
interval: 1h0m0s
values:
adminUser:
create: true
passwordHash: $2a$10$k0UXfoFU9qbQQYOD/fJWY.Wlr5z9YVTyC0WrnOk50QhKuo1Y0SZoK
username: admin
# ---
# apiVersion: traefik.containo.us/v1alpha1
# kind: IngressRoute
# metadata:
# namespace: flux-system
# name: flux-ingressroute
# spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`flux.kluster.moll.re`)
# kind: Rule
# services:
# - name: flux-dashboard-weave-gitops
# port: 9001
# tls:
# certResolver: default-tls

View File

@ -0,0 +1,75 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: journal-bot
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
secretRef:
name: journal-bot
timeout: 60s
url: ssh://git@git.kluster.moll.re:2222/remoll/journal-bot.git
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: journal-bot-kustomize
namespace: flux-system
spec:
force: true
interval: 1m0s
path: ./deployment/overlays/main
prune: false
sourceRef:
kind: GitRepository
name: journal-bot
---
apiVersion: image.toolkit.fluxcd.io/v1beta2
kind: ImageRepository
metadata:
name: journal-bot-registry
namespace: flux-system
spec:
image: registry.hub.docker.com/mollre/journal-bot
interval: 15m
---
apiVersion: image.toolkit.fluxcd.io/v1beta2
kind: ImagePolicy
metadata:
name: journal-bot-imagerange
namespace: flux-system
spec:
imageRepositoryRef:
name: journal-bot-registry
policy:
semver:
range: 1.x.x
---
apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImageUpdateAutomation
metadata:
name: journal-bot-automation
namespace: flux-system
spec:
interval: 30m
sourceRef:
kind: GitRepository
name: journal-bot
git:
checkout:
ref:
branch: main
commit:
author:
email: flux@moll.re
name: fluxcdbot
messageTemplate: '[CI SKIP] Bump {{range .Updated.Images}}{{println .}}{{end}}'
push:
branch: main
update:
path: ./deployment/base/deployment.yaml
strategy: Setters

View File

@ -0,0 +1,20 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: whoami-kustomize
namespace: flux-system
spec:
force: true
interval: 1m0s
path: ./whoami/overlays/main
prune: false
sourceRef:
kind: GitRepository
name: k3s-app-collection
# apiVersion: kustomize.toolkit.fluxcd.io/v1
# kind: HelmRelease
# ...

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aio
namespace: aio
labels:
app: aio
spec:
replicas: 1
selector:
matchLabels:
app: aio
template:
metadata:
labels:
app: aio
spec:
containers:
- name: aio
image: mollre/aio:latest
tty: true
volumeMounts:
- mountPath: /keys/
name: aio-nfs
resources:
requests:
memory: "250Mi"
cpu: 0.5
volumes:
- name: aio-nfs
persistentVolumeClaim:
claimName: aio-nfs

34
unused/aio.pvc.yaml Normal file
View File

@ -0,0 +1,34 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: aio
name: "aio-nfs"
labels:
directory: "aio"
spec:
storageClassName: fast
capacity:
storage: "100Mi"
accessModes:
- ReadWriteOnce
nfs:
path: /aio
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: aio
name: "aio-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Mi"
selector:
matchLabels:
directory: "aio"

114
unused/anki/deployment.yaml Normal file
View File

@ -0,0 +1,114 @@
apiVersion: v1
kind: Namespace
metadata:
name: anki
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: anki
namespace: anki
labels:
app: anki
spec:
replicas: 1
selector:
matchLabels:
app: anki
template:
metadata:
labels:
app: anki
spec:
containers:
- name: anki-server
image: ankicommunity/anki-sync-server:20220516
tty: true
volumeMounts:
- mountPath: /app/data
name: anki-data-nfs
resources:
requests:
memory: "250Mi"
cpu: 0.5
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: anki-data-nfs
persistentVolumeClaim:
claimName: anki-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: anki-http
namespace: anki
spec:
selector:
app: anki
ports:
- protocol: TCP
port: 27701
targetPort: 27701
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: anki
name: "anki-data-nfs"
labels:
directory: "anki"
spec:
storageClassName: fast
capacity:
storage: "100Mi"
accessModes:
- ReadWriteOnce
nfs:
path: /anki
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: anki
name: "anki-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Mi"
selector:
matchLabels:
directory: "anki"
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: anki-ingress
namespace: anki
spec:
entryPoints:
- websecure
routes:
- match: Host(`anki.kluster.moll.re`)
kind: Rule
services:
- name: anki-http
port: 27701
tls:
certResolver: default-tls

View File

@ -0,0 +1,92 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: anonaddy/anonaddy
# -- image tag
tag: 0.11.2
# -- image pull policy
pullPolicy: IfNotPresent
strategy:
type: Recreate
# -- environment variables. See more environment variables in the [anonaddy documentation](https://github.com/anonaddy/docker#environment-variables).
# @default -- See below
env:
TZ: "Europe/Berlin"
# -- Application key for encrypter service
# You can generate one through `anonaddy key:generate --show` or `echo "base64:$(openssl rand -base64 32)"`
APP_KEY:
# -- Root domain to receive email from
ANONADDY_DOMAIN: anonaddy.kluster.moll.re
# -- Long random string used when hashing data for the anonymous replies
ANONADDY_SECRET:
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8000
smtp:
enabled: true
port: 25
type: LoadBalancer
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
hosts:
- host: anonaddy.kluster.moll.re
paths:
- path: /
pathType: Prefix
service:
port: 8000
tls:
- hosts:
- anonaddy.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: false
emptydir:
enabled: false
# https://github.com/bitnami/charts/tree/master/bitnami/mariadb/#installing-the-chart
mariadb:
enabled: true
image:
name: arm64v8/mariadb:latest
pullSecrets: []
# primary:
# persistence:
# enabled: true
# auth:
# username: "username"
# password: "password"
# database: database
# -- Enable and configure redis subchart under this key.
# For more options see [redis chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/redis)
# @default -- See values.yaml
redis:
enabled: false
# auth:
# enabled: false

View File

@ -0,0 +1,119 @@
apiVersion: v1
kind: Namespace
metadata:
name: archive
labels:
app: archive
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: archive
name: archive-data-nfs
labels:
directory: archive
spec:
storageClassName: fast
capacity:
storage: "100Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /helbing_archive
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: archive
name: archive-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
selector:
matchLabels:
directory: archive
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: archive
namespace: archive
labels:
app: archive
spec:
replicas: 1
selector:
matchLabels:
app: archive
template:
metadata:
labels:
app: archive
spec:
containers:
- name: archive
image: archivebox/archivebox
tty: true
ports:
- containerPort: 8000
volumeMounts:
- mountPath: /data
name: archive-data
volumes:
- name: archive-data
persistentVolumeClaim:
claimName: archive-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: archive
namespace: archive
spec:
type: ClusterIP
ports:
- name: http
port: 8000
selector:
app: archive
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: archive
name: archive-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- archive.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: archive.kluster.moll.re
http:
paths:
- backend:
service:
name: archive
port:
number: 8000
path: /
pathType: Prefix

34
unused/authelia/pvc.yaml Normal file
View File

@ -0,0 +1,34 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: authelia
name: authelia-config-nfs
labels:
directory: authelia
spec:
storageClassName: fast
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /authelia
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: authelia
name: authelia-config-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: authelia

1235
unused/authelia/values.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
namespace: authentik
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.kluster.moll.re`)
kind: Rule
middlewares:
- name: authentik-websocket
services:
- name: authentik
port: 80
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: authentik-websocket
namespace: authentik
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
Upgrade: "websocket"

37
unused/authentik/pvc.yaml Normal file
View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: authentik
name: authentik-postgres-nfs
labels:
directory: authentik
spec:
storageClassName: slow
capacity:
storage: "5Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/authentik
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: authentik
name: authentik-postgres-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
selector:
matchLabels:
directory: authentik

View File

@ -0,0 +1,172 @@
# -- Server replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- server securityContext
securityContext: {}
worker:
# -- worker replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- worker securityContext
securityContext: {}
image:
repository: ghcr.io/goauthentik/server
tag: 2023.4.1
pullPolicy: IfNotPresent
pullSecrets: []
# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values
initContainers: {}
# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values
additionalContainers: {}
authentik:
# -- Log level for server and worker
log_level: info
# -- Secret key used for cookie singing and unique user IDs,
# don't change this after the first install
secret_key: "K9F5uNx1gzsk3q5tnjwFabBYgjBJcAv0qM135QRgzL81hRg4"
# -- Path for the geoip database. If the file doesn't exist, GeoIP features are disabled.
geoip: /geoip/GeoLite2-City.mmdb
# -- Mode for the avatars. Defaults to gravatar. Possible options 'gravatar' and 'none'
avatars: gravatar
outposts:
# -- Template used for managed outposts. The following placeholders can be used
# %(type)s - the type of the outpost
# %(version)s - version of your authentik install
# %(build_hash)s - only for beta versions, the build hash of the image
container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s
error_reporting:
# -- This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.beryju.org, and is fully opt-in
enabled: false
# -- This is a string that is sent to sentry with your error reports
environment: "k8s"
# -- Send PII (Personally identifiable information) data to sentry
send_pii: false
postgresql:
# -- set the postgresql hostname to talk to
# if unset and .Values.postgresql.enabled == true, will generate the default
# @default -- `{{ .Release.Name }}-postgresql`
host: 'postgres-postgresql.postgres'
# -- postgresql Database name
# @default -- `authentik`
name: "authentik"
# -- postgresql Username
# @default -- `authentik`
user: "authentik"
password: "authentik"
port: 5432
redis:
# -- set the redis hostname to talk to
# @default -- `{{ .Release.Name }}-redis-master`
host: '{{ .Release.Name }}-redis-master'
password: ""
# -- see configuration options at https://goauthentik.io/docs/installation/configuration/
env: {}
# AUTHENTIK_VAR_NAME: VALUE
envFrom: []
# - configMapRef:
# name: special-config
envValueFrom: {}
# AUTHENTIK_VAR_NAME:
# secretKeyRef:
# key: password
# name: my-secret
service:
# -- Service that is created to access authentik
enabled: true
type: ClusterIP
port: 80
name: http
protocol: TCP
labels: {}
annotations: {}
volumes: []
volumeMounts: []
# -- affinity applied to the deployments
affinity: {}
# -- nodeSelector applied to the deployments
resources:
server: {}
worker: {}
# WARNING! When initially deploying, authentik has to do a few DB migrations. This may cause it to die from probe
# failure, but will continue on reboot. You can disable this during deployment if this is not desired
livenessProbe:
# -- enables or disables the livenessProbe
enabled: true
httpGet:
# -- liveness probe url path
path: /-/health/live/
port: http
initialDelaySeconds: 50
periodSeconds: 10
readinessProbe:
enabled: true
httpGet:
path: /-/health/ready/
port: http
initialDelaySeconds: 50
periodSeconds: 10
serviceAccount:
# -- Service account is needed for managed outposts
create: true
prometheus:
serviceMonitor:
create: false
interval: 30s
scrapeTimeout: 3s
rules:
create: false
geoip:
# -- optional GeoIP, deploys a cronjob to download the maxmind database
enabled: false
# -- sign up under https://www.maxmind.com/en/geolite2/signup
accountId: ""
# -- sign up under https://www.maxmind.com/en/geolite2/signup
licenseKey: ""
editionIds: "GeoLite2-City"
image: maxmindinc/geoipupdate:v4.8
# -- number of hours between update runs
updateInterval: 8
postgresql:
# -- enable the bundled bitnami postgresql chart
enabled: false
postgresqlUsername: "authentik"
postgresqlPassword: "authentik"
postgresqlDatabase: "authentik"
# persistence:
# enabled: true
# existingClaim: authentik-postgres-nfs
redis:
# -- enable the bundled bitnami redis chart
enabled: true
architecture: standalone
auth:
enabled: false

View File

@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: backup
name: backup-nfs-access
labels:
directory: backup
spec:
storageClassName: fast
volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "5M"
nfs:
path: /export/kluster
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: backup
name: backup-nfs-access
spec:
resources:
requests:
storage: "5M"
storageClassName: fast
accessModes:
- ReadOnlyMany
selector:
matchLabels:
directory: backup

View File

@ -0,0 +1,64 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: restic-backblaze
spec:
schedule: "0 2 * * *"
# at 2:00, every tuesday and saturday
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
# nodeSelector:
# kubernetes.io/arch: arm64
# TODO no arm64 nodes anymore
restartPolicy: Never
hostname: restic-k3s-pod
# used by restic to identify the host
containers:
- name: restic-base-container
image: restic/restic:latest
command:
- /bin/sh
- -c
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
args: []
volumeMounts:
- mountPath: /data
name: backup-nfs-access
- mountPath: /credentials
name: restic-credentials
env:
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: restic-credentials
key: RESTIC_REPOSITORY
- name: B2_ACCOUNT_ID
valueFrom:
secretKeyRef:
name: restic-credentials
key: B2_ACCOUNT_ID
- name: B2_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: restic-credentials
key: B2_ACCOUNT_KEY
- name: RESTIC_PASSWORD_FILE
value: /credentials/restic-password
volumes:
- name: backup-nfs-access
persistentVolumeClaim:
claimName: backup-nfs-access
- name: restic-credentials
secret:
secretName: restic-credentials
optional: false

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./cronjob.yaml
- ./restic-credentials.secret.yaml

View File

@ -0,0 +1,8 @@
```
k kustomize backup/overlays/backup | k apply -f -
> secret/restic-credentials-backup created
> cronjob.batch/restic-backblaze-backup created
k kustomize backup/overlays/prune | k apply -f -
> secret/restic-credentials-prune created
> cronjob.batch/restic-backblaze-prune created
```

View File

@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: backup
nameSuffix: -backup
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -0,0 +1,26 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-patch
spec:
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# restic_reository is set in the secret
- >-
restic backup
-r $(RESTIC_REPOSITORY)
--verbose=2
/data
--exclude=s3/
# &&
# restic
# -r $(RESTIC_REPOSITORY)
# list snapshots
# Add command to copy existing backups to here!

View File

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: backup
nameSuffix: -prune
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -0,0 +1,23 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: prune-patch
spec:
schedule: "0 0 1/15 * *"
# at midnight, the first and 15. of every month
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# RESTIC_REPOSITORY is set in the secret
- >-
restic forget
-r $(RESTIC_REPOSITORY)
--verbose=2
--keep-daily 7 --keep-weekly 5
--prune

View File

@ -0,0 +1,54 @@
# apiVersion: v1
# kind: Secret
# metadata:
# name: cloudflare-api-token-secret
# namespace: cert-manager
# type: Opaque
# stringData:
# api-token:
# ---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cloudflare-letsencrypt-staging
spec:
acme:
email: me@moll.re
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: cloudflare-letsencrypt-issuer-account-key
solvers:
- dns01:
cloudflare:
email: mollator2@gmail.com
apiTokenSecretRef:
# Name of the secret created on the other resource
name: cloudflare-api-token-secret
key: api-token
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cloudflare-letsencrypt-prod
spec:
acme:
email: me@moll.re
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: cloudflare-letsencrypt-issuer-account-key
solvers:
- dns01:
cloudflare:
email: mollator2@gmail.com
apiTokenSecretRef:
# Name of the secret created on the other resource
name: cloudflare-api-token-secret
key: api-token

View File

@ -0,0 +1,494 @@
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
# Optional priority class to be used for the cert-manager pods
priorityClassName: ""
rbac:
create: true
podSecurityPolicy:
enabled: false
useAppArmor: true
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2
leaderElection:
# Override the namespace used to store the ConfigMap for leader election
namespace: "kube-system"
# The duration that non-leader candidates will wait after observing a
# leadership renewal until attempting to acquire leadership of a led but
# unrenewed leader slot. This is effectively the maximum duration that a
# leader can be stopped before it is replaced by another candidate.
# leaseDuration: 60s
# The interval between attempts by the acting master to renew a leadership
# slot before it stops leading. This must be less than or equal to the
# lease duration.
# renewDeadline: 40s
# The duration the clients should wait between attempting acquisition and
# renewal of a leadership.
# retryPeriod: 15s
installCRDs: false
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Comma separated list of feature gates that should be enabled on the
# controller pod.
featureGates: ""
image:
repository: quay.io/jetstack/cert-manager-controller
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-controller
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# Optional additional arguments
extraArgs: []
# Use this flag to set a namespace that cert-manager will use to store
# supporting resources required for each ClusterIssuer (default is kube-system)
# - --cluster-resource-namespace=kube-system
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
# - --enable-certificate-owner-ref=true
# Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver
# - --controllers=*,-certificaterequests-approver
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported
# securityContext:
# enabled: false
# fsGroup: 1001
# runAsUser: 1001
# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters
# you want to set, e.g.
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
# runAsNonRoot: true
# Container Security Context to be set on the controller component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
volumes: []
volumeMounts: []
# Optional additional annotations to add to the controller Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the controller Pods
# podAnnotations: {}
podLabels: {}
# Optional additional labels to add to the controller Service
# serviceLabels: {}
# Optional additional annotations to add to the controller service
# serviceAnnotations: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector: {}
ingressShim: {}
# defaultIssuerName: ""
# defaultIssuerKind: ""
# defaultIssuerGroup: ""
prometheus:
enabled: true
servicemonitor:
enabled: false
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# https_proxy: "https://proxy:8080"
# no_proxy: 127.0.0.1,localhost
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
webhook:
replicaCount: 1
timeoutSeconds: 10
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the webhook component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the webhook component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the webhook Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the webhook Pods
# podAnnotations: {}
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
# mutatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
# validatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook service
# serviceAnnotations: {}
# Optional additional arguments for webhook
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the Webhook Pods
podLabels: {}
# Optional additional labels to add to the Webhook Service
serviceLabels: {}
image:
repository: quay.io/jetstack/cert-manager-webhook
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-webhook
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# The port that the webhook should listen on for requests.
# In GKE private clusters, by default kubernetes apiservers are allowed to
# talk to the cluster nodes only on 443 and 10250. so configuring
# securePort: 10250, will work out of the box without needing to add firewall
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
securePort: 10250
# Specifies if the webhook should be started in hostNetwork mode.
#
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
# CNI (such as calico), because control-plane managed by AWS cannot communicate
# with pods' IP CIDR and admission webhooks are not working
#
# Since the default port for the webhook conflicts with kubelet on the host
# network, `webhook.securePort` should be changed to an available port if
# running in hostNetwork mode.
hostNetwork: false
# Specifies how the service should be handled. Useful if you want to expose the
# webhook to outside of the cluster. In some cases, the control plane cannot
# reach internal services.
serviceType: ClusterIP
# loadBalancerIP:
# Overrides the mutating webhook and validating webhook so they reach the webhook
# service using the `url` field instead of a service.
url: {}
# host:
cainjector:
enabled: true
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the cainjector component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the cainjector component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the cainjector Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the cainjector Pods
# podAnnotations: {}
# Optional additional arguments for cainjector
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the CA Injector Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-cainjector
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-cainjector
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# This startupapicheck is a Helm post-install hook that waits for the webhook
# endpoints to become available.
# The check is implemented using a Kubernetes Job- if you are injecting mesh
# sidecar proxies into cert-manager pods, you probably want to ensure that they
# are not injected into this Job's pod. Otherwise the installation may time out
# due to the Job never being completed because the sidecar proxy does not exit.
# See https://github.com/jetstack/cert-manager/pull/4414 for context.
startupapicheck:
enabled: true
# Pod Security Context to be set on the startupapicheck component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Timeout for 'kubectl check api' command
timeout: 1m
# Job backoffLimit
backoffLimit: 4
# Optional additional annotations to add to the startupapicheck Job
jobAnnotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Optional additional annotations to add to the startupapicheck Pods
# podAnnotations: {}
# Optional additional arguments for startupapicheck
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the startupapicheck Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-ctl
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-ctl
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
rbac:
# annotations for the startup API Check job RBAC and PSP resources
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the Job's ServiceAccount
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Automount API credentials for a Service Account.
automountServiceAccountToken: true

View File

@ -0,0 +1,26 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: crowdsec
name: crowdsec-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- crowdsec.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: crowdsec.kluster.moll.re
http:
paths:
- backend:
service:
name: crowdsec-service
port:
number: 3000
path: /
pathType: Prefix

Some files were not shown because too many files have changed in this diff Show More