14 Commits

Author SHA1 Message Date
55968ed454 Update owncloud/ocis Docker tag to v7 2024-12-30 12:01:41 +00:00
f8e9028810 monitoring fixes 2024-12-28 14:21:54 +01:00
ee20223507 monitoring cleanup 2024-12-23 22:40:35 +01:00
d6faeb3e4c immich update 2024-12-23 20:59:59 +01:00
c248c10fb9 Merge pull request 'Update Helm release cloudnative-pg to v0.23.0' (#272) from renovate/cloudnative-pg-0.x into main
Reviewed-on: #272
2024-12-23 19:57:59 +00:00
fd2fd109e4 Merge pull request 'Update Helm release grafana to v8.8.2' (#266) from renovate/grafana-8.x into main
Reviewed-on: #266
2024-12-23 19:57:42 +00:00
1e6db5d1fa Merge pull request 'Update Helm release redis to v20.6.1' (#267) from renovate/redis-20.x into main
Reviewed-on: #267
2024-12-23 19:57:11 +00:00
2436bc6ea4 Merge pull request 'Update Helm release metallb to v0.14.9' (#269) from renovate/metallb-0.x into main
Reviewed-on: #269
2024-12-23 19:56:54 +00:00
ac6ede67d9 Merge pull request 'Update vikunja/vikunja Docker tag to v0.24.6' (#271) from renovate/vikunja-vikunja-0.x into main
Reviewed-on: #271
2024-12-23 19:56:17 +00:00
19296874a7 Update Helm release cloudnative-pg to v0.23.0 2024-12-23 17:31:34 +00:00
122c9012df Update vikunja/vikunja Docker tag to v0.24.6 2024-12-23 05:01:30 +00:00
7d8a5d9ab3 Update Helm release redis to v20.6.1 2024-12-22 03:02:03 +00:00
e5465adc39 Update Helm release metallb to v0.14.9 2024-12-17 16:31:31 +00:00
9d304e56e1 Update Helm release grafana to v8.8.2 2024-12-17 03:01:49 +00:00
28 changed files with 775 additions and 305 deletions

View File

@@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: grafana
spec:
encryptedData:
password: AgAU6g/CwKj+1gPpt4DLvLsS0YCvJdVHWw4W4bRhibE9brVvcJtGB3D9MTJrSLVVwusaE6OR59og7oW5ge3yTd/9bbclXYLrxEi7OwvkQjCvo8MfD8yhJO9nV4Xs9Mjk2Z4SHGYuq6wvcssuJrpz5f0XEC7ocTRA+u0UaE+/b4FrYF71uyKGvj8GSXgLZUjGPFsGfPzwJn7cLBmlclVHx1xGbFpUc042m5Mulpn0QolFQnOwZiW4PL8pQyz1MXVRwCsz0RJd5apZL3XJ4X7BLMoAp+diHQ2xi3zoU9VScp+J2QgvFdRKgDa6v7Jz1f+HCwq5W/DoegwFXBrcMIfF2YrnvTnc1PCVwD9IHOeylO7J2hfi8teQiqTvvRlVgdBTLqoqlVovemf5k6ke6JfjTwnsJjTNnL7MKN5Qt0o7N2XRZ3ba9jp8cKbI7fyFQKaU2QEf2PIkp82kEnixmpA1aATgeA3W4E5Km7sKHUEB81+pwnOe54tzD2ShgQX/+UiswhWYTT+gdZKL1udBBemUDC0z9PSJNTPTy+hq+G4CIzVQUYxlioM3c+3geF7YLU8yXisj84pk44GN9KX3z5x+M2+LZL7agAWPUjxtrP2V+id7dNJQfCm0aSMeo57dVfb4zlBUAAgKIKjX+j1KqCVqE9zEO2F/QX7mY6MJTP2me3wmY7JAVRJ7d6bbkyyoDhs8JErLYLp0A+Eh+qx8nWgM9ErPVSA0
user: AgB8ZLG2EuERjg1nKdH/xadbUuIR2c8a9gF5fE8ctrp4DNDLLuuqmjyoHRiWpkrtfnE1yKg1rPP+asV9Lj5iVmE9J+OB3QUOeFS4MHciBNj7pa68zfFgnHP4kxMX6aXyKRQrYruYjHwfzCpOM1zyTEphuGlnokjQXxjF/mZsoM2NWn7WGReqfxqH95tJXfs9AUC5vVv/PHqd+KKRZH7+G1AnWVJ7RFQHedR7wyftO4/rkm8deMuZWtOLl25fAOyOr7+hSqT69s9/uTKSLJXjobSqtulqsR+v5lkwx2ThNKzmcEcuoenKG6lk8XLRSIscccZH3JTPh6IknQWUOC4nmYj+XUxE8Go0RX/4eL+D/6FrYrtp0gr3HOCLAGU4vAHMeKfJoyqykJVnvY6QY6bFgaziyOlWaoEHpg6g0vHHDwyX7HIDcQfJZGOLH9dhrWJ2sOkzyuuxfqWEgz/M2eBW4EUAudHwfTLPocSMUI+D6fjeciMojet5uxWMP7ZHh/E061f5+Vfk6CKYd9Kpi69Xah8KEyyHYP5NImkdIwjgllaEAd/FBE2+QJyTVZlUQC7y9ObagDMCUFaFbTS5QOLh5BOJDL5buEYFWG0IhoH47SC/pKeEOQH//uvoo27K9zvxTOQN1YOTrxCozmexMOsTIdhvU0dOnJDBrThSHKYLCeIokDOgUUT52FqDH51RoLoK3UkyGbMoq+M=
template:
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: grafana
type: Opaque

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-auth
namespace: grafana
spec:
encryptedData:
client_secret: AgCEdC1/ERlPQyQP+bd9gcW33Yrvl4uRbx+RF5AY4vYAquOzxmLTygMl/WZlB5wlCE5idIHgto6/fUWVZrQbmfClRqsW2pFoddKQAtS9cQNXwMjLCm7e0lXk9GM9O3ZwktmklFbCu8XewHmefGHhoJ28vPxPMaINv1fM4zYKvNz5RHf0dJfTHgxb68wRYjAbE/eJpRcVE3a29Yw6Gfa8Mb+cFI7RTHvjuv9LBgWqM6b3qvvJ4wYR2WKuiQrnJ5xAtHpMAI/2R80qq151wlaZueDZ1PwjRBHURkmPTmwZnrMrmIugNge7Tpww+ArZlG9kDfSu1aTJidbXbcpN6fyt1qARTCYrBlbn60PTYLnPL/NObvMCpjS6DsYsYz7MJ7WoOupu46Ib5paZHmak+CilC6lb9LjJj4EKfRsagZmWT07JavhHBW/tqjB3GToccIz4fOAOdA9aU51J4wCL2ctp2SgzCEKe2EaBK/f9nDd9ASmmon9PDwRDVtG8yTukrNcZHNzodi09Af81DB0RNa36Z3Sjt5xu94paN+mjiOWGf2JduVEq+60NbPvDbPE9e1aVH3DdQcij2WGZaTE8dAGLSsLoOkIq3m2E+Mbk1Re1gI9H18xJM72ivb5uDe7pzReyvO5DY4Pfq8JgQhPxWcDq9ScmWS6Bb+jdCKytFq5NafSAl+akPbbwN+1GFu33if/P5D9I2TwOA8V1wyVU
template:
metadata:
creationTimestamp: null
name: grafana-auth
namespace: grafana
type: Opaque

View File

@@ -35,13 +35,17 @@ datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Thanos
type: prometheus
url: http://thanos-querier.prometheus.svc:10902
isDefault: true
- name: Prometheus
type: prometheus
url: http://prometheus.prometheus.svc:9090
url: http://prometheus-server.monitoring.svc:80
isDefault: true
- name: Thanos
type: prometheus
url: http://thanos-querier.monitoring.svc:10902
isDefault: false
- name: Loki
type: loki
url: http://loki.monitoring.svc:3100
isDefault: false
dashboardProviders:
@@ -91,3 +95,4 @@ grafana.ini:
tls_skip_verify_insecure: true
auto_login: true
use_pkce: true
role_attribute_path: contains(groups[*], 'apps_admin') && 'Admin' || 'Editor'

View File

@@ -1,7 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
namespace: grafana
resources:
- namespace.yaml
@@ -17,5 +17,5 @@ helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 8.7.0
version: 8.8.2
valuesFile: grafana.values.yaml

View File

@@ -14,16 +14,16 @@ namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.8.5
version: 0.9.0
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.122.2
newTag: v1.123.0
- name: ghcr.io/immich-app/immich-server
newTag: v1.122.2
newTag: v1.123.0
patches:

View File

@@ -1,17 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
spec:
encryptedData:
password: AgAwMLnsYN1y8JQSqgGQbNG/8jKensTDsEw6ogITdkhDRlJcg8HQ5t7a6xLzNCrLHLJiQW8YOoyLT4lvFkBRMOa2EYcrDvBiRD0PjygWLIscKa7dA+jpAUf/icD9zsiDnTym2yf+VUANcmEgE6DiNvlcsrcmYqiR4pKVUTDlKPNOjOpTJ3nXETb3/sbt69E0JSGwtkvusYQSXKLU9KLbciihv+ycdkdlC9xy9myd4+vYZYXSh/eAvyZeb/hsmdSX7yaASmupMvet6Qsdt99PNzFQxtbQH+LQvYalVZ8bjWZQvCN/p0bA4H15otKBfe8rtEwVthgvyEvo6TK0Mg0pFY/b3AOGFmImnT3rDmgG6S8KTZH0Jce17ksFqvELQmHjqHuYpQsPDl44glM8kWRJ9Mf/Z424LRwZlJNVcOkuVl4qFqPUjzd2rWIyF0RaD0BE012C0ThJxKn2l17lVJbNtdUiR3qNpW01ot2m0CgKd2kXbjDmgRgAll4WgrukfCIn9ZnE0gVCFLJuK3MOQAaipFYy/bDO0izwl9T8nldgcI8OfiC3NTk2O+Es5jJRXu0oJGaC3HrTB7wXiwOoELvAsxLTPxKBiN9mCHCMtZX0PEtrio0dFRQ6Pi5xPng0KVT0I9dvGNsPdhPETNOB913WEvbgP8Gt3cj016nCzk51eUsYbXPpNL2B4kmbIhecqW/8kwKQPwYjVlBSXj3NxjzwMY6PvOl1
user: AgBqmjCYGMqy5zBE+vhtsynOvhWdHWDJDyl1D+laBtLjXTJwzRbNTdunHYo1ekwyqQ6Cr5pi4YMiLxAl1LIHF+Lfsp2QlY+ResAGzp9WgSBtNQDX3EmLDQofeWxMUDdMtMsE9wiKLCfNGDkRDsGquXTz+YFq03m1vH9cB8Bp+1ClWOTui+/Ce0MZlWsJZX1W8WXH7XTirtwUo0s53pc4AplUUH97ZEK3KSIxWa3gLCn0sAPDDLPX+JVA2xtpMq1XuVFiFifjzEtG2h0dejiF35FtSAR+rR4YmEfimk3QpRDfOqV5QUxvjCG+dTV49upSevF2mvbHW+o+lB6vEc6l9cZXvlbnMdaep3NmOsJcJ8wQIdFpFK4iVzFOTKSEbzLPlZ/J+sjS5vDXsfthorIO2faMA1iIf+I663zNxQU5btaK4TNYOZQlrFVjAmioRLkDhGZ6tDUPX/zMv+Crt+0HCwyEyhmvFZckDvezTZrxARSXXMKBVcvjHCyUNkz7ubZRiMU0PGM7fYuHr659e+XMRvj+LFA68ZaEIzCQpCFJenWWYAXgUdRG4LQ1LP2MwvRHpkOYSoRkHIpX7jOfhX82A60h/ta/CdbWifqNyL9OecvE3FKsZu/Kr0taw9W6nm6FBhQLgFkOnFrqp9dWnxfHruXuDBgcn0iE8nR7Ht2zS7hfQPeR4a3Y0xK3Plqbzdrb9HKnWQQhf14=
template:
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
type: Opaque

View File

@@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-auth
namespace: monitoring
spec:
encryptedData:
client_secret: AgCcKsnS3u2eI+fNVC9hAZ3QRFOHFErAzs5aQgX51CSdJwM03SZUoTyrDi5JPcHUVyS3MbevFH5piMhDTARMI3bLOjYlcwMbpf77JCPa7o95Y9asA/FW3lXicYt3biN9xBXJBz7Ws3fVRtEzyf6DmbGedT9gaX8aPwrUVbP19RdyJiuu76oB1A/jdUkX4K+X6kVvmoP/BWdypk/kdQJrzBNt00DIXF4NHfYey36AuhpBtqYZs4faA/tBXMXLE4RxPNtcHwNfVjnRj3v3qzNufD1fnweJvLq2UfLMrQjoR9XDVnM0zkpautylkI7yrvcoEH7ljnf6b1FMogOEZUfH1BIdqTd/WwrrlCqE58OPfJWthIfN+pQ8LvdHsGo3jc9gXvfXS2cStyhP06eTZ4D79kG+RtDQGOsD/Wpx7EcM6hbB3+dIjcs3wEAIGjpIVtY9JayW8YeRnFApMuhDST1+hscm+LdoGvaSTlAuGzv9BbVrPX/Fo9XKeYHlbG/x71Er+vF8WbW0wUa46MHLvbEy376XIdJDYi+vjl4eqznZ6YhvPbawhoKXT8ZcKUcUAjVcMue/O/jCSPZplbn3vdSCeqPTiqVqDw9PTMIeWFUepgPMxiGpFRAqdwIecFBnYItq0dXoGlFrZpo0S6AECgZjxzUR5EgdkdPlDDs2CN+d9yP7f2S+gmL7AIlQr74NW1GrTGw2x/rD4IJhunh7
template:
metadata:
creationTimestamp: null
name: grafana-auth
namespace: monitoring
type: Opaque

View File

@@ -21,7 +21,7 @@ helmCharts:
- name: redis
releaseName: redis
repo: https://charts.bitnami.com/bitnami
version: 20.5.0
version: 20.6.1
valuesInline:
auth:
enabled: false

View File

@@ -15,4 +15,4 @@ resources:
images:
- name: todos
newName: vikunja/vikunja
newTag: 0.24.5
newTag: 0.24.6

View File

@@ -10,6 +10,6 @@ namespace: metallb-system
helmCharts:
- name: metallb
repo: https://metallb.github.io/metallb
version: 0.14.8
version: 0.14.9
releaseName: metallb
valuesFile: values.yaml

View File

@@ -0,0 +1,28 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- namespace.yaml
# prometheus-operator crds
- thanos-objstore-config.sealedsecret.yaml
# - loki-objstore-config.sealedsecret.yaml
images:
- name: thanos
newName: quay.io/thanos/thanos
newTag: v0.37.2
helmCharts:
- name: loki
releaseName: loki
repo: https://grafana.github.io/helm-charts
version: 6.24.0
valuesFile: loki.values.yaml
- name: prometheus
releaseName: prometheus
repo: https://prometheus-community.github.io/helm-charts
version: 26.0.1
valuesFile: prometheus.values.yaml

View File

@@ -0,0 +1,86 @@
loki:
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
auth_enabled: false
pattern_ingester:
enabled: true
limits_config:
allow_structured_metadata: true
volume_enabled: true
retention_period: 672h # 28 days retention
ruler:
enable_api: true
storage:
bucketNames:
# don't care since we use the filesystem
chunks: NOTUSED
ruler: NOTUSED
admin: NOTUSED
type: filesystem
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
admin_api_directory: /var/loki/admin
minio:
enabled: false
deploymentMode: SingleBinary
singleBinary:
replicas: 1
persistence:
# -- Enable StatefulSetAutoDeletePVC feature
enableStatefulSetAutoDeletePVC: true
# -- Enable persistent disk
enabled: true
# -- Size of persistent disk
size: 10Gi
# -- Storage class to be used.
# If defined, storageClassName: <storageClass>.
# If set to "-", storageClassName: "", which disables dynamic provisioning.
# If empty or set to null, no storageClassName spec is
# set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
storageClass: nfs-client
# -- Section for configuring optional Helm test
helm:
enabled: false
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@@ -0,0 +1,574 @@
podSecurityPolicy:
enabled: true
server:
extraArgs:
log.level: debug
storage.tsdb.min-block-duration: 2h # Don't change this, see docs/components/sidecar.md
storage.tsdb.max-block-duration: 2h # Don't change this, see docs/components/sidecar.md
retention: 180d
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
statefulSet:
enabled: true
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10902"
# sidecarContainers:
# thanos-sidecar:
# image: thanos
# resources:
# requests:
# memory: "512Mi"
# env:
# - name: GOOGLE_APPLICATION_CREDENTIALS
# value: /etc/secret/sa
# args:
# - "sidecar"
# - "--log.level=debug"
# - "--tsdb.path=/data/"
# - "--prometheus.url=http://127.0.0.1:9090"
# - "--objstore.config={type: GCS, config: {bucket: BUCKET_REPLACE_ME}}"
# - "--reloader.config-file=/etc/prometheus-config/prometheus.yml"
# - "--reloader.config-envsubst-file=/etc/prometheus-shared/prometheus.yml"
# - "--reloader.rule-dir=/etc/prometheus-config/rules"
# ports:
# - name: sidecar-http
# containerPort: 10902
# - name: grpc
# containerPort: 10901
# - name: cluster
# containerPort: 10900
# volumeMounts:
# - name: storage-volume
# mountPath: /data
# - name: thanos-storage-secret
# mountPath: /etc/secret
# - name: config-volume
# mountPath: /etc/prometheus-config
# readOnly: false
# - name: prometheus-config-shared
# mountPath: /etc/prometheus-shared/
# readOnly: false
# # configPath: /etc/prometheus-shared/prometheus.yml
# replicaCount: 1
# persistentVolume:
# size: 20Gi
# storageClass: nfs-client
# extraVolumes: # spec.template.spec.volumes
# - name: prometheus-config-shared
# emptyDir: {}
# extraVolumeMounts: # spec.template.spec.containers.volumeMounts for prometheus container
# - name: prometheus-config-shared
# mountPath: /etc/prometheus-shared/
# resources:
# requests:
# memory: 1Gi
# global:
# scrape_interval: 5s
# scrape_timeout: 4s
# external_labels:
# prometheus_group: KLUSTER
# prometheus_replica: '$(HOSTNAME)'
# evaluation_interval: 5s
# extraSecretMounts:
# - name: thanos-storage-secret
# mountPath: /etc/secret/
# subPath: sa
# readOnly: false
# secretName: thanos-objstore-config
# as thanos sidecar is taking care of the config reload
# we can disable the prometheus configmap reload
configmapReload:
prometheus:
enabled: false
## Prometheus server ConfigMap entries
##
serverFiles:
## Alerts configuration
## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
alerting_rules.yml: {}
# groups:
# - name: Instances
# rules:
# - alert: InstanceDown
# expr: up == 0
# for: 5m
# labels:
# severity: page
# annotations:
# description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
# summary: 'Instance {{ $labels.instance }} down'
## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
alerts: {}
## Records configuration
## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
recording_rules.yml: {}
## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
rules: {}
prometheus.yml:
rule_files:
- /etc/config/recording_rules.yml
- /etc/config/alerting_rules.yml
## Below two files are DEPRECATED will be removed from this default values file
- /etc/config/rules
- /etc/config/alerts
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- localhost:9090
# A scrape configuration for running Prometheus on a Kubernetes cluster.
# This uses separate scrape configs for cluster components (i.e. API server, node)
# and services to allow each to use different authentication configs.
#
# Kubernetes labels will be added as Prometheus labels on metrics via the
# `labelmap` relabeling action.
# Scrape config for API servers.
#
# Kubernetes exposes API servers as endpoints to the default/kubernetes
# service so this uses `endpoints` role and uses relabelling to only keep
# the endpoints associated with the default/kubernetes service using the
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-nodes'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/$1/proxy/metrics
- job_name: 'kubernetes-nodes-cadvisor'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
# This configuration will work only on kubelet 1.7.3+
# As the scrape endpoints for cAdvisor have changed
# if you are using older version you need to change the replacement to
# replacement: /api/v1/nodes/$1:4194/proxy/metrics
# more info here https://github.com/coreos/prometheus-operator/issues/633
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
# Metric relabel configs to apply to samples before ingestion.
# [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
# metric_relabel_configs:
# - action: labeldrop
# regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
# Scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/scrape`: Only scrape services that have a value of
# `true`, except if `prometheus.io/scrape-slow` is set to `true` as well.
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
# service then set this appropriately.
# * `prometheus.io/param_<parameter>`: If the metrics endpoint uses parameters
# then you can set any parameter
- job_name: 'kubernetes-service-endpoints'
honor_labels: true
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
action: drop
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+)
replacement: __param_$1
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service
- source_labels: [__meta_kubernetes_pod_node_name]
action: replace
target_label: node
# Scrape config for slow service endpoints; same as above, but with a larger
# timeout and a larger interval
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
# service then set this appropriately.
# * `prometheus.io/param_<parameter>`: If the metrics endpoint uses parameters
# then you can set any parameter
- job_name: 'kubernetes-service-endpoints-slow'
honor_labels: true
scrape_interval: 5m
scrape_timeout: 30s
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+)
replacement: __param_$1
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service
- source_labels: [__meta_kubernetes_pod_node_name]
action: replace
target_label: node
- job_name: 'prometheus-pushgateway'
honor_labels: true
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: pushgateway
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe services that have a value of `true`
- job_name: 'kubernetes-services'
honor_labels: true
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: service
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape endpoint to be configured via the
# following annotations:
#
# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`,
# except if `prometheus.io/scrape-slow` is set to `true` as well.
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
- job_name: 'kubernetes-pods'
honor_labels: true
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
action: drop
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
action: replace
regex: (https?)
target_label: __scheme__
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
action: replace
regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})
replacement: '[$2]:$1'
target_label: __address__
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
action: replace
regex: (\d+);((([0-9]+?)(\.|$)){4})
replacement: $2:$1
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
replacement: __param_$1
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: pod
- source_labels: [__meta_kubernetes_pod_phase]
regex: Pending|Succeeded|Failed|Completed
action: drop
- source_labels: [__meta_kubernetes_pod_node_name]
action: replace
target_label: node
# Example Scrape config for pods which should be scraped slower. An useful example
# would be stackriver-exporter which queries an API on every scrape of the pod
#
# The relabeling allows the actual pod scrape endpoint to be configured via the
# following annotations:
#
# * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
- job_name: 'kubernetes-pods-slow'
honor_labels: true
scrape_interval: 5m
scrape_timeout: 30s
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
action: replace
regex: (https?)
target_label: __scheme__
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
action: replace
regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})
replacement: '[$2]:$1'
target_label: __address__
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
action: replace
regex: (\d+);((([0-9]+?)(\.|$)){4})
replacement: $2:$1
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
replacement: __param_$1
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: pod
- source_labels: [__meta_kubernetes_pod_phase]
regex: Pending|Succeeded|Failed|Completed
action: drop
- source_labels: [__meta_kubernetes_pod_node_name]
action: replace
target_label: node
# Configuration of subcharts defined in Chart.yaml
## alertmanager sub-chart configurable values
## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager
##
alertmanager:
enabled: false
## kube-state-metrics sub-chart configurable values
## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics
##
kube-state-metrics:
## If false, kube-state-metrics sub-chart will not be installed
##
enabled: true
## prometheus-node-exporter sub-chart configurable values
## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter
##
prometheus-node-exporter:
## If false, node-exporter will not be installed
##
enabled: true
rbac:
pspEnabled: false
containerSecurityContext:
allowPrivilegeEscalation: false
## prometheus-pushgateway sub-chart configurable values
## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway
##
prometheus-pushgateway:
## If false, pushgateway will not be installed
##
enabled: false

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
spec:
encryptedData:
thanos.yaml: AgAqlul2V1idfgbWvq/0ljSFlxOOsQmwlGd+jRvDDyi1nlR8woHrp7lW6AxJ/8mBtb5htCuJzLgx+HVrN/EN+fRn5xG3D5+8xs4jWBOQ49MgLSAjJavFPcVY5xiBpGaw/N8aotlbfv6Wa2/+cmiAzVDPwnOj5zCS/EU58Tu2YFeVSbMUlu0NFAeyBW0DVT2enuVLToP4Ge4T0U9F99NHOh2zlVG82iI+4RxCu/WBkOU/urVleGwCYkcr/ItmXiwRXbwnWUtEUf28Q4ArpuZXFkKZUMoIwOjkXgOn/ySBLVvf0yy1+WOcYAIX9ouxu6i4T1GAZO9RnKeMJOIyebI3EOMA2dxQFpQg2/XhhHz2Ds2oDX/yr7vXbZJGyiCvTnnFUvFALKWIjRXXWphdqHDk6iP8tFIKVFsn7UxgMVFRcs6DmcMpBgFOcjpHr4HFZap5G9hI3cscmkNfwU+JOXkDEGRpZkkECza4wlQln8Wptq1qa+I+DSclqLOcvoEvNCJCIIgh5tINJ0KiZcrBvymUZZ9VduH4TFHR/UQK7M7It892TDNUlIp2UDWiuQ2DJysOJXmvSiNo8PGWSyDJwKJPhaWqXz9RUsb4D8gq/a+0qC7DOICrJEUj7WL8dwaKoQa32Cf+wopwrjFWSE7pAfiBJo+Dqa9jHIDv2hVsdU8NXqiFK35XHyUT4i0KWc+UZg4ObotGxYMvRtJuc3S7ZGTJ4YKDP5iThuNSuNd1pd1YjirpvVtL2o5BYh2i55F3DfVREofYpBCjK1e43mHOwEUYZ7Ff6p1+S0PXZnkL53xHMiiW3yr0v1g2ZYk7vzkENb9epzm24fNX/4ZiJdb0glEJmB674bgDSeh9PA5q8nJIKk6vsbrzfaAYWIn5Ai9MPbAVfg9pPkMyy9ydd+SqecujkWm++4dHqB1WJUg=
template:
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
type: Opaque

View File

@@ -9,6 +9,6 @@ namespace: pg-ha
helmCharts:
- name: cloudnative-pg
releaseName: pg-controller
version: 0.22.1
version: 0.23.0
valuesFile: values.yaml
repo: https://cloudnative-pg.io/charts/

View File

@@ -1,20 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: prometheus
resources:
- namespace.yaml
# prometheus-operator crds
- https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.70.0/bundle.yaml
- prometheus.yaml
- thanos-objstore-config.sealedsecret.yaml
# thanos deployment from kube-thanos project
- thanos-store.statefulset.yaml
- thanos-query.deployment.yaml
images:
- name: thanos
newName: quay.io/thanos/thanos
newTag: v0.37.2

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,78 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: prometheus # needs to be the same as in the kustomization.yaml
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
securityContext:
runAsUser: 65534 # same as the thanos sidecar
resources:
requests:
memory: 400Mi
retention: 730d
retentionSize: 3GiB
serviceAccountName: prometheus
enableAdminAPI: false
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
thanos:
version: v0.34.1
objectStorageConfig:
# loads the config from a secret named thanos-objstore-config in the same namespace
key: thanos.yaml
name: thanos-objstore-config
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
selector:
prometheus: prometheus

View File

@@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: prometheus
spec:
encryptedData:
thanos.yaml: AgByW/LKzPh0QeNsHR8Us4bJ/0chIQErhfh5plY1tjqiZyNLlxZ+NygYYzVggW02k4gAsKs68trbLBbeTTEhpKYP8hUphNb13lrgp07wYpOQjUF57i6RjPM2QNJpO0qLSk/nOPIOtR3XKn+nXxdJDmh3j5y0zxVz5O7MLh7adwOaHlyWTLMJjI1cda8YljDp2FYs24lHHMw4gXAYUecGDJNQqw5Xy9IiGh8kBbcKe3j6bVCj1yxPbHszmvZ2s+Q+mnndXnoeLMhwjZhMF8/PETxmSZ2bs41k3lHm/2rcPQCJsl9CuJEGKhu6ndKrVhtury4/US/FheEOoGF0YZk/AQMHII/mxy8haPNxtQTDs4rfYz/BA8cMMZll44wxOY9gAOmhm3sG6GI9wcB1Z65p98xSuDaInknO80l07vwMAAvmrZbT53Fmefrxl+jE1pImcGEsL0MfP621nTXlOBW9keF+6aUOubrwjPKKSXdqZU21acNbaIeRQSJyaOBStAKLfnPFmaryGisgNu0hCk/WmszZ0/s/ilvdMdAD6kKoiKL/NWfXtHATh/fnd76bKfSzNQk6e+WWfomToYVU0HRgAaWnIzjB9Q4tjxkbRwteEodU+K1BvD4xQ0sfQB2vHlDjQGC3pjIUFCWG0SzQGb7oe6+X2CJpcNIBHwF661iELJpJkg8dLsPtwb+8Rj6BL+ZtyVKYv18nDNON0WVpwJb/IHHSmxfYD5b/q6fATCFj55IXK5Nr4VO65a2Sv5Iv0/TTUVkwb8dkMmwfs5qcQiZ4oKWx8Ol6GkjDZrFARUtHQ/9KiZ9xDj3tPic2TeQfKr27sgc4lEL8RSxaRKHkkxIAioea3YgFfBm7ZfoxMlzJnQ1vI2vDvJcRXhWKSGdXiKOddwLSVMZFsSRRi9AxH87Sjt7j1wvsA7xgBqc=
template:
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: prometheus
type: Opaque

View File

@@ -1,55 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: thanos-querier
labels:
app: thanos-querier
spec:
replicas: 1
selector:
matchLabels:
app: thanos-querier
template:
metadata:
labels:
app: thanos-querier
spec:
containers:
- name: thanos
image: thanos
args:
- query
- --log.level=debug
- --query.replica-label=replica
- --endpoint=dnssrv+_grpc._tcp.thanos-store:10901
- --endpoint=dnssrv+_grpc._tcp.prometheus:9090
ports:
- name: http
containerPort: 10902
- name: grpc
containerPort: 10901
livenessProbe:
httpGet:
port: http
path: /-/healthy
readinessProbe:
httpGet:
port: http
path: /-/ready
---
apiVersion: v1
kind: Service
metadata:
name: thanos-querier
spec:
selector:
app: thanos-querier
ports:
- name: http
protocol: TCP
port: 10902
targetPort: http
- name: grpc
protocol: TCP
port: 10901
targetPort: grpc

View File

@@ -1,71 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: thanos-store
labels:
app: thanos-store
spec:
replicas: 1
selector:
matchLabels:
app: thanos-store
template:
metadata:
labels:
app: thanos-store
thanos-store-api: "true"
spec:
containers:
- name: thanos
image: thanos
args:
- store
- --log.level=debug
- --data-dir=/data
- --grpc-address=0.0.0.0:10901
- --http-address=0.0.0.0:10902
- --objstore.config-file=/etc/secret/thanos.yaml
- --index-cache-size=500MB
- --chunk-pool-size=500MB
ports:
- name: http
containerPort: 10902
- name: grpc
containerPort: 10901
livenessProbe:
httpGet:
port: 10902
path: /-/healthy
readinessProbe:
httpGet:
port: 10902
path: /-/ready
volumeMounts:
- name: thanos-objstore-config
mountPath: /etc/secret
readOnly: true
- name: thanos-data
mountPath: /data
volumes:
- name: thanos-objstore-config
secret:
secretName: thanos-objstore-config
- name: thanos-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: thanos-store
name: thanos-store
spec:
ports:
- name: grpc
port: 10901
targetPort: 10901
- name: http
port: 10902
targetPort: 10902
selector:
app: thanos-store

View File

@@ -1,22 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-application
name: grafana-application
namespace: argocd
spec:
project: infrastructure
project: apps
source:
repoURL: git@github.com:moll-re/bootstrap-k3s-infra.git
repoURL: ssh://git@git.kluster.moll.re:2222/remoll/k3s-infra.git
targetRevision: main
path: infrastructure/prometheus
path: apps/grafana
destination:
server: https://kubernetes.default.svc
namespace: monitoring
namespace: grafana
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- Replace=true
# because the prom crds exceed the default 256Ki limit

View File

@@ -20,7 +20,7 @@ resources:
- traefik/
- external-dns/
- external-services/
- prometheus/application.yaml
- monitoring/application.yaml
- authelia/
# simple apps
@@ -35,7 +35,7 @@ resources:
- linkding/
- media/
- minecraft/application.yaml
- monitoring/
- grafana/
- ntfy/
- paperless/
- recipes/

View File

@@ -3,12 +3,13 @@ kind: Application
metadata:
name: monitoring-application
namespace: argocd
spec:
project: apps
project: infrastructure
source:
repoURL: ssh://git@git.kluster.moll.re:2222/remoll/k3s-infra.git
repoURL: git@github.com:moll-re/bootstrap-k3s-infra.git
targetRevision: main
path: apps/monitoring
path: infrastructure/monitoring
destination:
server: https://kubernetes.default.svc
namespace: monitoring