initial migration

This commit is contained in:
2023-10-05 14:34:37 +02:00
parent 5cb41fd5e4
commit 41f0153fd0
145 changed files with 17441 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: restic-rclone-gdrive
spec:
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
restartPolicy: Never
hostname: restic-k3s-pod
# used by restic to identify the host
containers:
- name: restic-base-container
image: restic/restic:latest
command:
- /bin/sh
- -c
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
args: []
volumeMounts:
- mountPath: /data
name: backup-nfs-access
env:
- name: RESTIC_REPOSITORY
value: rest:http://rclone-gcloud:8000/kluster
# lives in the same namespace
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: restic-gdrive-credentials
key: restic-password
volumes:
- name: backup-nfs-access
persistentVolumeClaim:
claimName: backup-nfs-access

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./cronjob.yaml
- ./restic-password.secret.yaml

View File

@@ -0,0 +1,8 @@
```
k kustomize backup/overlays/backup | k apply -f -
> secret/restic-credentials-backup created
> cronjob.batch/restic-backblaze-backup created
k kustomize backup/overlays/prune | k apply -f -
> secret/restic-credentials-prune created
> cronjob.batch/restic-backblaze-prune created
```

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# namespace: backup
nameSuffix: -backup
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@@ -0,0 +1,25 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-patch
spec:
schedule: "0 2 * * *"
# at 2:00, every day
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# -r $(RESTIC_REPOSITORY) not needed, bc set as env var
- >-
restic backup
--verbose=2
/data
--exclude=s3/
&&
restic
list snapshots

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# namespace: backup
nameSuffix: -prune
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@@ -0,0 +1,24 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: prune-patch
spec:
schedule: "0 0 1/15 * *"
# at midnight, the first and 15. of every month
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# RESTIC_REPOSITORY is set in the secret
- >-
restic forget
-r $(RESTIC_REPOSITORY)
--verbose=2
--keep-daily 7 --keep-weekly 5
--prune

View File

@@ -0,0 +1,22 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "rclone-config-files",
"namespace": "backup",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "rclone-config-files",
"namespace": "backup",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"rclone.conf": "AgCQ13IG+R6bs+nAxe1xuDXttYlvGlLfV3oQ6c0qtoF2jXB8hN3LftydHn+Se3LjghmQKAIErfsA7ZRhJoWfFuSm2AIc3w2mMonsga5gjBx/56/tZSvnT2Bzn/5UXktTVxwEINSBP0dYiMcn4/G+5hO3bngmG+lCZXeI7yWoTW8H+8NKYxDHUzdoBBhPPPLTERTRZHB8EzOPUlefHq/2y/NpUfkxyLSjYk0/X45W6XNzH6MfdA2x6omxd4giDQSEwJGdXqIXu1rPnPjV7WVcA8qJzkQbxhzjqpUcFgM12YsLGVVW8HSSdAy+ZNdTXmhCIu2+pI+AVuol4QY9r/gU3xlGhFmc3asW5k4iOfn7/ZEr3Yk8JplAYM+GWQ07s59MqYdGOhqFUpVmkjO97Z29iaeReQZCwxzl/PmxUtfI20eTmtUlFKE3fObMr27sZcXgeJS3ktHOONGoqvHHeuqd4hfTaVAGwVOAEoBY8Xnkq3ECN5ld8V4zR8e52QHtANflN4IJgjnGO5pMQyAW+XASAJDxG48q7ruu9i0mI4vuM0rVuoWi2v9I30/M7Mv2xAYnmKC7NIao1mDya3paidHwkIu12480oBDdHZpm5NSqHtQr/HKMQWnbu6CrufrDmTqoVe/ew5uaqjbfrBBys35k5ObUUPlhU3putgfmsR3YZXDaAqOwIoXQ30wm02gCA5z/WNEY3EaKP6RhgsowwkrPPniQfz4EaxQQjmZ/toe/xpwzSZjmoVnJtJabiuqL/B/eY6WpNOTjOzsc7Z69EOyhZMs41gNoA32RRUbFO1ppOu8518cE12KpsGbH6K6NcucSrKh2Gd3xNGwjaGQVT2vLTVi9YwByiwvrsVpNU06f2v0fcZWeRgoFoUkKMj746lw0E+X7oF0+PmfPT2IeTRszHECkbStSvFZNDivcdJyDFutocAZKNjDoAnVPlTNVYwKKcmHvw3sOOXhVN7NOj/+9UxSNyRvip7GPZKtRF1u9ftlD6OaLCCVSip7MJ41a7TugBTUUaMJbQUTmidWKZn6A0nctAvdrPbBatPI2BZQ4amwdXa2bWyE7DI13WaCm6kAVJijsAmfVrVX3C+Ft5p8unbjsVQ/ErdpKTjlq9mJsie3TQdME5r74GlcURiVXdLc7KcV7vpf6yy88XS6ee+Y9WmlYDAwRX+taMilRDlunMeF5Zmh12DCXMzsradEifEOZ/Mg5BMznxvrZv3iHDArm/j4QW7Bi0To3+f2826IAaXMlI4ze7e9Ny3NUbgy85yE+RNYiio0+wvWRKraxpqI0EODy/juBed3VcoWlOfch0hKU4BZTVrU5rDEmwYcp6oWnXE92fhVH7wjy4IV3WUSubYg="
}
}
}

View File

@@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rclone-gcloud
spec:
selector:
matchLabels:
app: rclone-gcloud
template:
metadata:
labels:
app: rclone-gcloud
spec:
containers:
- name: rclone
image: rclone/rclone:latest
command: ["/bin/sh", "-c"]
args: # mounted as a secret
# >- strips newlines
# sleep infinity
- >-
rclone
--config /config/rclone.conf
serve restic
--addr :8000
-v
ETHZ-gdrive:backup
volumeMounts:
# from secret
- name: rclone-config
mountPath: /config
readOnly: true
volumes:
- name: rclone-config
secret:
secretName: rclone-config-files
---
apiVersion: v1
kind: Service
metadata:
name: rclone-gcloud
spec:
selector:
app: rclone-gcloud
ports:
- protocol: TCP
port: 8000
targetPort: 8000

View File

@@ -0,0 +1,2 @@
export RESTIC_REPOSITORY=rest:http://127.0.0.1:8000/kluster
export RESTIC_PASSWORD="2r,TE0.,U@gni3e%xr)_LC64"

View File

@@ -0,0 +1,38 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: omv-s3-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`s3.kluster.moll.re`)
kind: Rule
services:
- name: omv-s3
port: 9000
# scheme: https
tls:
certResolver: default-tls
---
apiVersion: v1
kind: Endpoints
metadata:
name: omv-s3
namespace: external
subsets:
- addresses:
- ip: 192.168.1.157
ports:
- port: 9000
---
apiVersion: v1
kind: Service
metadata:
name: omv-s3
namespace: external
spec:
ports:
- port: 9000
targetPort: 9000

View File

@@ -0,0 +1,38 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: omv-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`omv.kluster.moll.re`)
kind: Rule
services:
- name: omv
port: 443
scheme: https
tls:
certResolver: default-tls
---
apiVersion: v1
kind: Endpoints
metadata:
name: omv
namespace: external
subsets:
- addresses:
- ip: 192.168.1.157
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: omv
namespace: external
spec:
ports:
- port: 443
targetPort: 443

View File

@@ -0,0 +1,55 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: proxmox-ingressroute
namespace: external
spec:
entryPoints:
- websecure
routes:
- match: Host(`proxmox.kluster.moll.re`)
middlewares:
- name: proxmox-websocket
kind: Rule
services:
- name: proxmox
port: 8006
scheme: https
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: proxmox-websocket
namespace: external
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"
---
apiVersion: v1
kind: Endpoints
metadata:
name: proxmox
namespace: external
subsets:
- addresses:
- ip: 192.168.1.150
ports:
- port: 8006
---
apiVersion: v1
kind: Service
metadata:
name: proxmox
namespace: external
spec:
ports:
- port: 8006
targetPort: 8006

View File

@@ -0,0 +1,2 @@
name: metallb
chart: metallb/metallb

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
spec:
secretTemplates:
- name: secret-1
labels:
label1: value1
annotations:
key1: value1
stringData:
data-name0: data-value0
data:
data-name1: ZGF0YS12YWx1ZTE=

View File

@@ -0,0 +1,14 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- 192.168.3.0/24
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: empty
namespace: metallb-system

View File

@@ -0,0 +1,337 @@
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# existingConfigMap: "config"
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: false
image:
repository: quay.io/frrouting/frr
tag: 7.5.1
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true
validationFailurePolicy: Fail

View File

@@ -0,0 +1,13 @@
```
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
```

View File

@@ -0,0 +1,9 @@
namespace: nfs-provisioner
bases:
- github.com/kubernetes-sigs/nfs-subdir-external-provisioner//deploy
resources:
- namespace.yaml
patchesStrategicMerge:
- nfs_values.yaml

View File

@@ -0,0 +1,5 @@
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioner

View File

@@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfs-client-provisioner
name: nfs-client-provisioner
spec:
template:
spec:
containers:
- name: nfs-client-provisioner
env:
- name: NFS_SERVER
value: 192.168.1.157
- name: NFS_PATH
value: /export/kluster/
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.157
path: /export/kluster/

View File

@@ -0,0 +1,13 @@
Create a new role by executing the createuser command. With the options below, the new role will not be a superuser and will not have privileges for creating new databases or new roles (this is usually the default for the createuser command).
k exec -it -n postgres postgres-postgresql-0 -- bash
```
createuser -U postgres USER_NAME -S -D -R -P
```
You will be prompted to enter first the password for the new role and to reenter it, and then to enter the postgres role password.
Create a new database with the new role as the owner:
```
createdb -U postgres DATABASE_NAME -O USER_NAME
```

View File

@@ -0,0 +1,2 @@
name: postgres
chart: bitnami/postgresql

View File

@@ -0,0 +1,21 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "postgres-password",
"namespace": "postgres",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "postgres-password",
"namespace": "postgres",
"creationTimestamp": null
}
},
"encryptedData": {
"password": "AgCVytxZbe1yjT7OQuA7LocPTgn6Ikx9pDJAA49Ktboy86dJWlxnBke23O0qn3ELFTGUTDaMhBcJB0neqA0RjTTW3o7PsvbxEBvrP5F1EK4jN2vHti8Jgt/CUbOlJVfFuGPaL2DG9M7vafUnL3AQvZv/YkL79Q32Wcg9nPq+4iT7fTGQzUu22G6bmKJv/SnByAnBIzZRsL3R3pP4J7suG+5+K6PDlNRbIb0mIoy1vjBz5PKQAR2Hrh1+kLFIJEIwDuinSDHRDUoa9fChC52x/Oc4PavFw8RWTXjot5cnEOkUK3umSx0jnD247nPc8sRW87hmHE3O/T+doDqEetQxtarSNPxCZXwkVJCIAxg48M29mdkPiOUu2Rr9W9w+HnN8j7mA2rHYAxxi3KPeDBL7kaFH+Xtyv+MT6upRr9BHfSbA/gMPjT37dJmbEYJAvEEyZZJK6TpXUkLh3jnhg1P180t8AnJVX4KQhjUm+UmgUCytxEjp082vxoKEHop6I7f4qzUYfudaG825i0zL11yjSvUbQbdoe8j3C5pNs5OgNBboGqYGfreCcp76zKdNrNI6GYhtj04AuOQZP5SD9/bqsP4JW4yFYsWsq3XuqIxE/2ExCRvDOFu2H1rnPnkcvUYr30doYPIugP40l7AY18YucUsbH19ww7jM1TOejo5QS5wb39uygwf4j0+XjbD3iV12AQzaEnk/pfo="
}
}
}

View File

@@ -0,0 +1,37 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: postgres
name: postgres-nfs
labels:
directory: postgres
spec:
storageClassName: fast
capacity:
storage: "50Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteMany
nfs:
path: /export/kluster/postgres
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: postgres
name: postgres-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteMany
resources:
requests:
storage: "50Gi"
selector:
matchLabels:
directory: postgres

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,377 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: {}
labels:
name: sealed-secrets-service-proxier
name: sealed-secrets-service-proxier
namespace: kube-system
rules:
- apiGroups:
- ""
resourceNames:
- sealed-secrets-controller
resources:
- services
verbs:
- get
- apiGroups:
- ""
resourceNames:
- 'http:sealed-secrets-controller:'
- http:sealed-secrets-controller:http
- sealed-secrets-controller
resources:
- services/proxy
verbs:
- create
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sealed-secrets-key-admin
subjects:
- kind: ServiceAccount
name: sealed-secrets-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: {}
labels:
name: sealed-secrets-key-admin
name: sealed-secrets-key-admin
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: {}
labels:
name: secrets-unsealer
name: secrets-unsealer
rules:
- apiGroups:
- bitnami.com
resources:
- sealedsecrets
verbs:
- get
- list
- watch
- apiGroups:
- bitnami.com
resources:
- sealedsecrets/status
verbs:
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-service-proxier
name: sealed-secrets-service-proxier
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sealed-secrets-service-proxier
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:authenticated
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
spec:
minReadySeconds: 30
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
name: sealed-secrets-controller
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
spec:
containers:
- args: []
command:
- controller
env: []
image: docker.io/bitnami/sealed-secrets-controller:v0.23.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: http
name: sealed-secrets-controller
ports:
- containerPort: 8080
name: http
readinessProbe:
httpGet:
path: /healthz
port: http
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
stdin: false
tty: false
volumeMounts:
- mountPath: /tmp
name: tmp
imagePullSecrets: []
initContainers: []
securityContext:
fsGroup: 65534
runAsNonRoot: true
runAsUser: 1001
seccompProfile:
type: RuntimeDefault
serviceAccountName: sealed-secrets-controller
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: tmp
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: sealedsecrets.bitnami.com
spec:
group: bitnami.com
names:
kind: SealedSecret
listKind: SealedSecretList
plural: sealedsecrets
singular: sealedsecret
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: SealedSecret is the K8s representation of a "sealed Secret" -
a regular k8s Secret that has been sealed (encrypted) using the controller's
key.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: SealedSecretSpec is the specification of a SealedSecret
properties:
data:
description: Data is deprecated and will be removed eventually. Use
per-value EncryptedData instead.
format: byte
type: string
encryptedData:
additionalProperties:
type: string
type: object
x-kubernetes-preserve-unknown-fields: true
template:
description: Template defines the structure of the Secret that will
be created from this sealed secret.
properties:
data:
additionalProperties:
type: string
description: Keys that should be templated using decrypted data
nullable: true
type: object
metadata:
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata'
nullable: true
properties:
annotations:
additionalProperties:
type: string
type: object
finalizers:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
name:
type: string
namespace:
type: string
type: object
x-kubernetes-preserve-unknown-fields: true
type:
description: Used to facilitate programmatic handling of secret
data.
type: string
type: object
required:
- encryptedData
type: object
status:
description: SealedSecretStatus is the most recently observed status of
the SealedSecret.
properties:
conditions:
description: Represents the latest available observations of a sealed
secret's current state.
items:
description: SealedSecretCondition describes the state of a sealed
secret at a certain point.
properties:
lastTransitionTime:
description: Last time the condition transitioned from one status
to another.
format: date-time
type: string
lastUpdateTime:
description: The last time this condition was updated.
format: date-time
type: string
message:
description: A human readable message indicating details about
the transition.
type: string
reason:
description: The reason for the condition's last transition.
type: string
status:
description: 'Status of the condition for a sealed secret. Valid
values for "Synced": "True", "False", or "Unknown".'
type: string
type:
description: 'Type of condition for a sealed secret. Valid value:
"Synced"'
type: string
required:
- status
- type
type: object
type: array
observedGeneration:
description: ObservedGeneration reflects the generation most recently
observed by the sealed-secrets controller.
format: int64
type: integer
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
---
apiVersion: v1
kind: Service
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system
spec:
ports:
- port: 8080
targetPort: 8080
selector:
name: sealed-secrets-controller
type: ClusterIP
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: secrets-unsealer
subjects:
- kind: ServiceAccount
name: sealed-secrets-controller
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: {}
labels:
name: sealed-secrets-controller
name: sealed-secrets-controller
namespace: kube-system

View File

@@ -0,0 +1,2 @@
name: traefik
chart: traefik/traefik

View File

@@ -0,0 +1,87 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-config
namespace: traefik-system
data:
traefik.toml: |
[ping]
[global]
checkNewVersion = true
sendAnonymousUsage = false
[log]
level = "INFO"
[accessLog]
# format = "json"
# filePath = "/var/log/traefik/access.log"
[accessLog.fields]
defaultMode = "keep"
[accessLog.fields.names]
"RequestProtocol" = "drop"
"level" = "drop"
"RequestContentSize" = "drop"
"RequestScheme" = "drop"
"StartLocal" = "drop"
"StartUTC" = "drop"
# ClientUsername: drop
# DownstreamStatusLine: drop
# RequestAddr: drop
# RequestCount: drop
# RequestHost: drop
# RequestLine: drop
# UpstreamAddr: drop
# UpstreamStatusLine: drop
# duration: drop
# msg: drop
# time: drop
# upstream: drop
# user_agent: drop
[api]
dashboard = true
insecure = true
debug = false
[providers]
[providers.kubernetesCRD]
allowCrossNamespace = true
[providers.kubernetesIngress]
allowExternalNameServices = true
ingressClass = "traefik"
[serversTransport]
insecureSkipVerify = true
[entryPoints]
[entryPoints.web]
address = ":8000"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entryPoint]
to = ":443" # should be the same as websecure but the loadbalancer maps 443 -> 8443
scheme = "https"
[entryPoints.websecure]
address = ":8443"
[entryPoints.metrics]
address = ":9100"
[entryPoints.traefik]
address = ":9000"
[metrics]
[metrics.influxDB2]
address = "http://influxdb-influxdb2.monitoring:80"
token = "N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ=="
org = "influxdata"
bucket = "kluster"
[certificatesResolvers.default-tls.acme]
email = "me@moll.re"
storage = "/certs/acme.json"
[certificatesResolvers.default-tls.acme.tlsChallenge]
[experimental.plugins.traefik-plugin-geoblock]
moduleName = "github.com/nscuro/traefik-plugin-geoblock"
version = "v0.10.0"

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: traefik-system
name: traefik-certificate
labels:
directory: traefik
spec:
storageClassName: fast
capacity:
storage: "10Mi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/traefik/certs
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: traefik-system
name: traefik-certificate
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Mi"
selector:
matchLabels:
directory: traefik

View File

@@ -0,0 +1,2 @@
name: telegraf-traefik
chart: influxdata/telegraf

View File

@@ -0,0 +1,151 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.24"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-polling-service"
# An older "volumeMounts" key was previously added which will likely
# NOT WORK as you expect. Please use this newer configuration.
volumes:
- name: traefik-logs
persistentVolumeClaim:
claimName: traefik-logs
mountPoints:
- name: traefik-logs
mountPath: /traefik_logs
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: # to read the traefik logs the pod must be on the same node as traefik
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: # matches labels: app.kubernetes.io/name=traefik
- key: app.kubernetes.io/name
operator: In
values:
- traefik
topologyKey: "kubernetes.io/hostname"
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
type: ClusterIP
annotations: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
# Create only for the release namespace or cluster wide (Role vs ClusterRole)
clusterWide: false
# Rules for the created rule
rules: []
# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be
# able to scan the pods for scraping labels. The following rules have been taken from:
# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46
# - apiGroups:
# - ""
# resources:
# - nodes
# - nodes/proxy
# - nodes/metrics
# - services
# - endpoints
# - pods
# - ingresses
# - configmaps
# verbs:
# - get
# - list
# - watch
# - apiGroups:
# - "extensions"
# resources:
# - ingresses/status
# - ingresses
# verbs:
# - get
# - list
# - watch
# - nonResourceURLs:
# - "/metrics"
# verbs:
# - get
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Annotations for the ServiceAccount
annotations: {}
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "10s"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: true
# processors:
# - enum:
# mapping:
# field: "status"
# dest: "status_code"-+
# value_mappings:
# healthy: 1
# problem: 2
# critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
organization: "influxdata"
bucket: "kluster"
# retention_policy: "2w"
inputs:
- docker_log:
endpoint: "unix:///var/run/docker.sock"
from_beginning: false
container_name_include: ["traefik"]

View File

@@ -0,0 +1,241 @@
# Default values for Traefik
image:
name: traefik
# defaults to appVersion
tag: ""
pullPolicy: IfNotPresent
#
# Configure the deployment
#
deployment:
enabled: true
# Can be either Deployment or DaemonSet
kind: Deployment
# Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1
# Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60
# The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0
# Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {}
# Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {}
# Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
# Additional containers (e.g. for metric offloading sidecars)
additionalContainers: []
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
# - name: socat-proxy
# image: alpine/socat:1.0.5
# args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
# volumeMounts:
# - name: dsdsocket
# mountPath: /socket
# Additional volumes available for use with initContainers and additionalContainers
additionalVolumes:
# - name: traefik-logs
# persistentVolumeClaim:
# claimName: traefik-logs
- name: traefik-certificate
persistentVolumeClaim:
claimName: traefik-certificate
- name: traefik-config
configMap:
name: traefik-config
# - name: dsdsocket
# hostPath:
# path: /var/run/statsd-exporter
# Additional initContainers (e.g. for setting file permission as shown below)
initContainers: []
# The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik/issues/6972
# - name: volume-permissions
# image: busybox:1.31.1
# command: ["sh", "-c", "chmod -Rv 600 /data/*"]
# volumeMounts:
# - name: data
# mountPath: /data
# Use process namespace sharing
shareProcessNamespace: false
# Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet
# Additional imagePullSecrets
imagePullSecrets: []
# - name: myRegistryKeySecretName
# Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x
ingressClass:
# true is not unit-testable yet, pending https://github.com/rancher/helm-unittest/pull/12
enabled: true
isDefaultClass: true
# Use to force a networking.k8s.io API Version for certain CI/CD applications. E.g. "v1beta1"
fallbackApiVersion: ""
# Activate Pilot integration
pilot:
enabled: false
token: ""
# Toggle Pilot Dashboard
# dashboard: false
# Enable experimental features
experimental:
http3:
enabled: false
plugins:
enabled: false
kubernetesGateway:
enabled: false
# certificate:
# group: "core"
# kind: "Secret"
# name: "mysecret"
# By default, Gateway would be created to the Namespace you are deploying Traefik to.
# You may create that Gateway in another namespace, setting its name below:
# namespace: default
# Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
enabled: false
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {}
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
#
# Configure providers
#
providers:
kubernetesCRD:
enabled: true
allowCrossNamespace: false
allowExternalNameServices: true
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
kubernetesIngress:
enabled: true
allowExternalNameServices: true
allowEmptyServices: false
ingressClass: traefik
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
# - "--ping"
# - "--ping.entrypoint=web"
volumes: []
# - name: traefik-config
# mountPath: /config
# configMap:
# name: traefik-config
# - name: public-cert
# mountPath: "/certs"
# type: secret
# - name: '{{ printf "%s-configs" .Release.Name }}'
# mountPath: "/config"
# type: configMap
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts:
# - name: traefik-logs
# mountPath: /var/log/traefik
# nfs:
# server: 192.168.1.157
# path: /kluster/traefik
# # For instance when using a logshipper for access logs
# - name: traefik-logs
# # claimName: traefik-logs
# mountPath: /var/log/traefik
- name: traefik-certificate
# claimName: traefik-certificate
mountPath: /certs
- name: traefik-config
mountPath: /config
globalArguments:
- "--configfile=/config/traefik.toml"
additionalArguments: []
# Environment variables to be passed to Traefik's binary
env:
- name: TZ
value: "Europe/Berlin"
# - name: SOME_VAR
# value: some-var-value
# - name: SOME_VAR_FROM_CONFIG_MAP
# valueFrom:
# configMapRef:
# name: configmap-name
# key: config-key
# - name: SOME_SECRET
# valueFrom:
# secretKeyRef:
# name: secret-name
# key: secret-key
# Configure ports
ports: {} # leave unconfigured to use the values from the toml file
envFrom: []
# - configMapRef:
# name: config-map-name
# - secretRef:
# name: secret-name
tlsOptions: {}
# Options for the main traefik service, where the entrypoints traffic comes
# from.
service:
enabled: true
type: LoadBalancer
# Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
annotations: {}
# Additional annotations for TCP service only
annotationsTCP: {}
# Additional annotations for UDP service only
annotationsUDP: {}
# Additional service labels (e.g. for filtering Service by custom labels)
labels: {}
# Additional entries here will be added to the service spec.
# Cannot contain type, selector or ports entries.
spec:
# externalTrafficPolicy: Local
loadBalancerIP: 192.168.3.1