307 Commits

Author SHA1 Message Date
ab96719964 small fixes 2024-05-15 17:57:15 +02:00
0215ecaf87 add (broken) deployment 2024-05-13 14:27:34 +02:00
0eaa9fe774 empty line removed 2024-05-13 14:26:53 +02:00
192e2e869f minecraft 2024-05-13 14:25:49 +02:00
0fd9936db5 gitea runner improvements 2024-05-13 14:25:49 +02:00
1a9d0fc00c Merge pull request 'Update jellyfin/jellyfin Docker tag to v10.9.0' (#94) from renovate/jellyfin-jellyfin-10.x into main
Reviewed-on: #94
2024-05-12 11:07:57 +00:00
a8dfca3c43 Update jellyfin/jellyfin Docker tag to v10.9.0 2024-05-11 19:01:08 +00:00
42e2bc35a5 Merge pull request 'Update ghcr.io/gethomepage/homepage Docker tag to v0.8.13' (#90) from renovate/ghcr.io-gethomepage-homepage-0.x into main
Reviewed-on: #90
2024-05-10 08:46:45 +00:00
7e2e5a56db Merge branch 'main' into renovate/ghcr.io-gethomepage-homepage-0.x 2024-05-10 08:45:47 +00:00
01279dd023 Merge pull request 'Update octodns/octodns Docker tag to v2024.05' (#91) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #91
2024-05-08 13:29:51 +00:00
d6ce07a8a0 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.6.0' (#92) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #92
2024-05-08 13:28:59 +00:00
6eb617086a Update ghcr.io/mealie-recipes/mealie Docker tag to v1.6.0 2024-05-07 12:00:58 +00:00
8137bf8f1b Update apps/immich/kustomization.yaml 2024-05-06 17:59:00 +00:00
5f1dcaabba Update octodns/octodns Docker tag to v2024.05 2024-05-06 15:30:45 +00:00
37bdb32f43 Update ghcr.io/gethomepage/homepage Docker tag to v0.8.13 2024-05-06 05:30:44 +00:00
ca15a6497c Add apps/media/ingress.yaml 2024-05-04 12:10:12 +00:00
095d2d6392 remove limits 2024-05-04 12:47:10 +02:00
b2993c9395 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.5' (#86) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #86
2024-05-04 09:06:57 +00:00
d7b0f658de Merge pull request 'Update actualbudget/actual-server Docker tag to v24.5.0' (#89) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #89
2024-05-04 09:06:26 +00:00
391c71729b Update actualbudget/actual-server Docker tag to v24.5.0 2024-05-03 17:00:53 +00:00
bee5dd0c0b Update owncloud/ocis Docker tag to v5.0.3 2024-05-02 16:30:46 +00:00
25ab46e69a change base image for k8s conformity 2024-05-02 17:26:36 +02:00
123412e073 small naming mistake 2024-05-02 17:15:30 +02:00
39818887fa add gitea actions 2024-05-02 17:12:43 +02:00
0700609568 Update homeassistant/home-assistant Docker tag to v2024.5 2024-05-01 19:30:44 +00:00
198b24132e Update Helm release metallb to v0.14.5 2024-04-27 09:13:11 +00:00
f6e45d089b Update docker.io/bitnami/sealed-secrets-controller Docker tag to v0.26.2 2024-04-27 09:12:35 +00:00
23eab57208 Update Helm release cloudnative-pg to v0.21.0 2024-04-25 12:01:00 +00:00
a94521f197 update ocis 2024-04-21 12:59:19 +02:00
38f58d86c9 new versiob 2024-04-21 12:26:44 +02:00
76d1c51157 improve thanos/prometheus retention 2024-04-20 19:04:44 +02:00
7aaeeded89 update and improve grafana 2024-04-20 18:37:19 +02:00
9b93016f93 bump immich version 2024-04-20 18:34:52 +02:00
aaf624bb42 bump immich version 2024-04-20 18:27:01 +02:00
8536d91288 Update Helm release immich to v0.6.0 2024-04-20 16:26:29 +00:00
3f62bee199 reduce gitea load by ditching redis 2024-04-20 18:22:16 +02:00
f9f39818a1 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.1' (#77) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #77
2024-04-17 16:29:25 +00:00
a73e6dc4db Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.1 2024-04-17 14:01:07 +00:00
1df7abf987 Merge pull request 'Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12' (#76) from renovate/ghcr.io-gethomepage-homepage-0.x into main
Reviewed-on: #76
2024-04-17 12:54:36 +00:00
0e1bb58c24 Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12 2024-04-17 12:54:36 +00:00
fcd2d2eaa2 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.0' (#75) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #75
2024-04-17 12:54:14 +00:00
455790d3c6 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.0 2024-04-16 16:31:00 +00:00
cdbcdba25d Update Helm release gitea to v10.1.4 2024-04-16 10:58:27 +00:00
9dcb06678b remove old filesync deployments (nextcloud) 2024-04-16 12:56:54 +02:00
a4fe0a7fe4 add homepage as a deployment 2024-04-16 12:43:33 +02:00
ece9faa60c Merge pull request 'Update octodns/octodns Docker tag to v2024.04' (#72) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #72
2024-04-16 07:54:27 +00:00
d4bea2994c Merge pull request 'Update Helm release traefik to v27' (#66) from renovate/traefik-27.x into main
Reviewed-on: #66
2024-04-16 07:53:47 +00:00
0ec3bf9ea8 Update Helm release traefik to v27 2024-04-12 08:01:16 +00:00
0c5760b22b Update octodns/octodns Docker tag to v2024.04 2024-04-10 16:30:48 +00:00
e144722d59 fix cnpg syncing issues 2024-04-10 14:01:57 +02:00
bf6e7aa10c mabye like that? 2024-04-06 14:33:57 +02:00
ae53c44428 fix servicemonitors 2024-04-06 14:24:06 +02:00
05d5b02347 Update actualbudget/actual-server Docker tag to v24.4.0 2024-04-06 12:22:05 +00:00
337237a0f8 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.4.0 2024-04-06 12:21:39 +00:00
ccc4b13c35 Update adguard/adguardhome Docker tag to v0.107.48 2024-04-06 12:13:15 +00:00
a6a9c7c217 update home assistant and fix prometheus 2024-04-06 14:12:07 +02:00
bc0f29f028 update immich 2024-04-03 14:11:08 +02:00
e2c9d73728 update to dashboards 2024-04-01 13:23:46 +02:00
442c07f031 bad configmap 2024-04-01 13:11:11 +02:00
8fd9fa6f11 better dashboards 2024-04-01 12:21:50 +02:00
516d7e8e09 like that? 2024-04-01 11:57:06 +02:00
acf9d34b10 Merge branch 'main' of ssh://git.kluster.moll.re:2222/remoll/k3s-infra 2024-04-01 11:47:11 +02:00
3ffead0a14 try fixing grafana 2024-04-01 11:47:01 +02:00
b6bdc09efc Update docker.io/bitnami/sealed-secrets-controller Docker tag to v0.26.1 2024-04-01 09:33:23 +00:00
49b21cde52 proper backup config 2024-03-31 19:37:18 +02:00
deed24aa01 try fixing homeassistant again 2024-03-31 19:28:19 +02:00
9cfb98248d update immich 2024-03-31 19:08:14 +02:00
7bc4beefce Update Helm release cloudnative-pg to v0.20.2 2024-03-31 15:19:09 +00:00
ce9ff68c26 Update binwiederhier/ntfy Docker tag to v2.10.0 2024-03-31 15:18:06 +00:00
8249e7ef01 Update adguard/adguardhome Docker tag to v0.107.46 2024-03-31 15:15:00 +00:00
14e65df483 Update Helm release metallb to v0.14.4 2024-03-31 15:14:18 +00:00
f6fef4278b enable wal for grafana? 2024-03-29 00:55:34 +01:00
ef50df8386 slight mistake 2024-03-28 19:45:27 +01:00
b6df7604ed add missing references 2024-03-28 19:22:59 +01:00
a03d869d0c added dashboards 2024-03-28 19:20:28 +01:00
1063349fbe use sealedsecret 2024-03-28 19:17:19 +01:00
b88c212b57 now with correct secret 2024-03-28 19:10:01 +01:00
38a522a8d6 cleaner monitoring 2024-03-28 19:07:42 +01:00
046936f8f6 fix 2024-03-28 14:04:07 +01:00
309cbc08f5 so? 2024-03-28 13:55:57 +01:00
08b4c7eb5e switch ocis to nfs-provisioner 2024-03-28 13:52:44 +01:00
58e632e0b8 migrate mealie pvc 2024-03-28 13:21:50 +01:00
30d02edebc update rss 2024-03-28 13:19:53 +01:00
e30bfe64ae dum dum 2024-03-28 12:59:51 +01:00
764a3eafb7 switch some apps over to nfs-client 2024-03-28 12:40:48 +01:00
eff07665de add nfs-provisioner with sensible path template 2024-03-28 12:29:16 +01:00
571aebe78d now? 2024-03-27 14:15:13 +01:00
91a2ae5fe8 annoying 2024-03-27 14:13:22 +01:00
f12c21ef18 update vikunja 2024-03-27 14:03:55 +01:00
2a96b288bf or like that? 2024-03-27 09:39:58 +01:00
6f3a5aeab2 okey 2024-03-27 09:37:51 +01:00
b001bd3efc maybe like that? 2024-03-27 09:36:22 +01:00
b54794df35 dum-dum 2024-03-27 09:19:00 +01:00
51c8f7c092 fix the db location 2024-03-27 09:15:25 +01:00
cfb1a87a5b now with correct api path 2024-03-27 09:07:01 +01:00
10483431c6 trying todos like that 2024-03-27 09:04:40 +01:00
3a9450da9d now? 2024-03-27 08:34:48 +01:00
374e23ba1e trying to fix immich 2024-03-27 08:32:46 +01:00
66f703f5e1 update to correct location 2024-03-27 08:25:53 +01:00
4b05b53d72 small fixes 2024-03-27 00:38:34 +01:00
cfbc7fcd0d disable typesense 2024-03-27 00:31:41 +01:00
ffed2aea50 add media back 2024-03-27 00:27:57 +01:00
e674bf5b94 slim down the file sync 2024-03-27 00:12:50 +01:00
133af74ae0 missing namespace resource 2024-03-27 00:05:55 +01:00
f648064304 remove nfs-client 2024-03-26 23:50:27 +01:00
c7180f793a trying like that 2024-03-26 22:58:17 +01:00
4fcdaad297 move prometheus to its own config 2024-03-26 22:13:02 +01:00
f4b99ca037 now perhaps? 2024-03-26 11:16:33 +01:00
588bf774f9 or like that? 2024-03-26 10:58:44 +01:00
e18c661dbd typo 2024-03-26 10:57:18 +01:00
7d65ffea6a remove ocis:// 2024-03-26 10:56:34 +01:00
e460b5324a try differently configured todos 2024-03-26 10:55:25 +01:00
6fe166e60c manage todos 2024-03-24 15:31:59 +01:00
6ceb3816fb cleanup with regards to upcoming migration 2024-03-23 11:45:11 +01:00
19b63263e6 whoopsie 2024-03-22 14:57:17 +01:00
20d46d89d2 also manage ocis 2024-03-22 14:54:30 +01:00
7aee6c7cf0 basic auth maybe? 2024-03-22 14:53:29 +01:00
443da20ff9 steps towards a completely managed cluster 2024-03-20 23:45:08 +01:00
84a47b15b6 increase renovate frequency 2024-03-12 21:28:35 +01:00
40259ee57e Update apps/immich/kustomization.yaml 2024-03-12 14:01:08 +00:00
619368a2fd Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.3' (#54) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #54
2024-03-12 09:04:37 +00:00
3288966b95 Merge pull request 'Update octodns/octodns Docker tag to v2024.03' (#55) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #55
2024-03-12 09:04:16 +00:00
d12d50b906 Update apps/immich/kustomization.yaml 2024-03-12 09:03:55 +00:00
c7f0221062 Update octodns/octodns Docker tag to v2024.03 2024-03-12 09:02:04 +00:00
7819867091 Update homeassistant/home-assistant Docker tag to v2024.3 2024-03-12 09:01:41 +00:00
dd4c3d7a36 Update apps/immich/kustomization.yaml 2024-03-12 08:37:11 +00:00
e66905402e Merge pull request 'Update Helm release immich to v0.4.0' (#47) from renovate/immich-0.x into main
Reviewed-on: #47
2024-03-12 08:35:56 +00:00
1bdb4522c3 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.3.2' (#53) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #53
2024-03-12 08:32:10 +00:00
b5845479c2 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.3.2 2024-03-10 19:01:42 +00:00
f2f31c4f4e Merge pull request 'Update binwiederhier/ntfy Docker tag to v2.9.0' (#52) from renovate/binwiederhier-ntfy-2.x into main
Reviewed-on: #52
2024-03-10 09:57:10 +00:00
ded829500c Update binwiederhier/ntfy Docker tag to v2.9.0 2024-03-09 11:04:03 +00:00
f762f5451b Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.45' (#51) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #51
2024-03-08 07:42:50 +00:00
709f21998e Update adguard/adguardhome Docker tag to v0.107.45 2024-03-07 18:01:21 +00:00
47f091be83 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.3.0' (#48) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #48
2024-03-07 17:31:17 +00:00
da8be916bf fix bad naming 2024-03-07 13:21:05 +01:00
ad67acb9e7 again 2024-03-07 13:17:50 +01:00
5a7b5a82d7 maybe the service was misconfigured 2024-03-07 13:16:14 +01:00
2c32db61ec why? 2024-03-07 13:13:54 +01:00
141b80d15c man... 2024-03-07 13:11:08 +01:00
bf1d4badbe or directly use the dns name 2024-03-07 13:08:29 +01:00
be48049e22 fix bad syntax 2024-03-07 13:01:21 +01:00
3a629284f3 perhaps now 2024-03-07 12:59:04 +01:00
28c92e727f last chance 2024-03-06 14:48:14 +01:00
9a65c531f1 now? 2024-03-06 14:37:23 +01:00
52a086df73 come on 2024-03-06 14:34:19 +01:00
b728e21a15 expose grpc of store 2024-03-06 14:31:04 +01:00
da32c9c2ce neew 2024-03-06 14:25:47 +01:00
846390600e let's try with query as well 2024-03-06 14:24:07 +01:00
18d7a6b4cb or maybe like that? 2024-03-06 11:34:15 +01:00
31c8e91502 actually don't specify data 2024-03-06 11:31:15 +01:00
f0adf6b5db change user of prometheus to make thanos happy 2024-03-06 08:14:55 +01:00
b24ae9c698 with correct image 2024-03-05 16:44:42 +01:00
f3c108e362 fix 2024-03-05 16:41:54 +01:00
d2a8d92864 also use thanos object store 2024-03-05 16:39:15 +01:00
10816c4bd9 Update actualbudget/actual-server Docker tag to v24.3.0 2024-03-03 20:01:34 +00:00
aca0d4ba21 Update Helm release immich to v0.4.0 2024-03-03 20:01:27 +00:00
1ad56fd27e Merge pull request 'Update Helm release traefik to v26.1.0' (#42) from renovate/traefik-26.x into main
Reviewed-on: #42
2024-03-03 19:33:13 +00:00
773a155627 Update Helm release traefik to v26.1.0 2024-03-03 19:33:13 +00:00
61945b3507 Merge pull request 'Update Helm release metallb to v0.14.3' (#34) from renovate/metallb-0.x into main
Reviewed-on: #34
2024-03-03 19:32:16 +00:00
4aa21cb0cd Update Helm release metallb to v0.14.3 2024-03-03 19:32:16 +00:00
d233ab96eb Merge pull request 'Update Helm release gitea to v10.1.3' (#46) from renovate/gitea-10.x into main
Reviewed-on: #46
2024-03-03 19:31:04 +00:00
df581e0110 Update Helm release gitea to v10.1.3 2024-03-03 19:31:04 +00:00
8a114b9384 remove homarr 2024-03-03 20:30:06 +01:00
ab6506f4f2 update immich 2024-02-21 18:35:13 +01:00
87242d293a Merge pull request 'Update Helm release homarr to v1.0.6' (#38) from renovate/homarr-1.x into main
Reviewed-on: #38
2024-02-13 10:34:15 +00:00
11d46ec295 Merge pull request 'Update Helm release gitea to v10.1.1' (#35) from renovate/gitea-10.x into main
Reviewed-on: #35
2024-02-13 10:33:42 +00:00
1b3702c4c8 Update Helm release gitea to v10.1.1 2024-02-13 10:33:42 +00:00
9b68b4a915 lets be more generous with memory 2024-02-11 18:15:11 +01:00
18889d7391 add other recipes 2024-02-11 11:28:30 +01:00
a38ad1d7e6 bye bye 2024-02-10 19:35:22 +01:00
edcb9158f5 what now? 2024-02-10 19:21:04 +01:00
71b1c252f3 turns out it was important 2024-02-10 19:17:28 +01:00
b30f44d2c6 last chance 2024-02-10 19:16:08 +01:00
85abf0fda6 with services? 2024-02-10 19:04:08 +01:00
5e21ceaad3 lets try this 2024-02-10 18:58:20 +01:00
3f5c1a5a5c add configmap 2024-02-10 10:56:59 +01:00
0195833fc3 service account not needed 2024-02-10 10:54:41 +01:00
64835e16de slight fix 2024-02-10 10:53:20 +01:00
4e11a33855 correct backend 2024-02-10 10:46:38 +01:00
bad024861a add recipes 2024-02-10 10:45:53 +01:00
fe5d6a9014 Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.44' (#39) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #39
2024-02-08 09:24:43 +00:00
f2898d7e0b Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.2' (#40) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #40
2024-02-08 09:24:05 +00:00
f67f0c8889 Update homeassistant/home-assistant Docker tag to v2024.2 2024-02-07 21:02:14 +00:00
0ccb17d8e1 Update adguard/adguardhome Docker tag to v0.107.44 2024-02-07 11:01:45 +00:00
bb6d417937 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.2.0' (#36) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #36
2024-02-07 10:09:46 +00:00
4e2ebe2540 Merge pull request 'Update octodns/octodns Docker tag to v2024' (#37) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #37
2024-02-07 10:09:26 +00:00
c5310b0f00 Update Helm release homarr to v1.0.6 2024-02-04 17:01:35 +00:00
46ef973f70 Update octodns/octodns Docker tag to v2024 2024-02-03 22:02:18 +00:00
c12d2dc7a6 whoopsie 2024-02-03 22:27:29 +01:00
e28c6ffd52 add physics 2024-02-03 22:19:09 +01:00
7ba6860ea0 Update actualbudget/actual-server Docker tag to v24.2.0 2024-02-03 21:01:51 +00:00
33c23ee42b Merge pull request 'Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1' (#31) from renovate/ghcr.io-immich-app-immich-machine-learning-1.x into main
Reviewed-on: #31
2024-02-03 20:58:07 +00:00
b2f8c8bced Merge branch 'main' into renovate/ghcr.io-immich-app-immich-machine-learning-1.x 2024-02-03 20:57:54 +00:00
d5277d3d6a Merge pull request 'Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1' (#32) from renovate/ghcr.io-immich-app-immich-server-1.x into main
Reviewed-on: #32
2024-02-03 20:56:19 +00:00
e3c90f5ede Merge branch 'main' into renovate/ghcr.io-immich-app-immich-server-1.x 2024-02-03 20:55:47 +00:00
eb5bda63db Merge pull request 'Update Helm release grafana to v7.3.0' (#26) from renovate/grafana-7.x into main
Reviewed-on: #26
2024-02-03 20:54:45 +00:00
a10a216f0e Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1 2024-01-31 20:01:05 +00:00
3cf9fd0b87 Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1 2024-01-31 20:01:03 +00:00
ea1fa1637f Update Helm release grafana to v7.3.0 2024-01-30 15:00:50 +00:00
96abe2a0f5 auto admin 2024-01-23 18:16:40 +01:00
9623f33b59 Merge pull request 'Update Helm release gitea to v10' (#16) from renovate/gitea-10.x into main
Reviewed-on: #16
2024-01-22 10:30:17 +00:00
b065fc7e59 idioto 2024-01-22 11:27:58 +01:00
617ed5601c allow renovate to fetch release notes 2024-01-22 11:11:34 +01:00
7e21ce4181 Update Helm release gitea to v10 2024-01-22 10:00:35 +00:00
eeaed091ab Merge pull request 'Update Helm release metallb to v0.13.12' (#30) from renovate/metallb-0.x into main
Reviewed-on: #30
2024-01-16 08:59:45 +00:00
ee52d2b777 Update Helm release metallb to v0.13.12 2024-01-15 19:00:31 +00:00
384e9fbaec no service account needed 2024-01-15 19:12:19 +01:00
606aded35f argo manage metallb 2024-01-15 19:03:49 +01:00
a3aa8888e9 or like that? 2024-01-14 17:31:24 +01:00
aaeb43e9c3 let's check if we get ips like that 2024-01-14 17:27:37 +01:00
a9b1d02a7e keeping some ips here 2024-01-14 17:22:57 +01:00
76b49270eb fix type 2024-01-14 12:58:42 +01:00
9b57715f92 bad yaml 2024-01-14 12:56:23 +01:00
85a96cf87b bump version 2024-01-14 12:54:33 +01:00
78b4be8fbd next try 2024-01-14 12:51:14 +01:00
7bc10b57ce lets try adding thanos 2024-01-14 12:41:03 +01:00
de26a052e8 QOL improvements 2024-01-11 22:05:05 +01:00
28ff769757 Deploy full on octodns 2024-01-11 21:57:02 +01:00
6a58ea337e forgot secret 2024-01-11 21:38:24 +01:00
2af279c161 still crashes, now due to auth 2024-01-11 21:37:29 +01:00
c26997ff83 single run only 2024-01-11 18:39:13 +01:00
a354464f6e try with local directory 2024-01-11 18:26:37 +01:00
268a9f3a7a correct env vars and labels 2024-01-11 18:12:12 +01:00
4ddeaf6c99 try this 2024-01-11 18:08:35 +01:00
b6f9a818af Execute 2nd command as well 2024-01-11 18:04:55 +01:00
f4670aa471 Add ddns 2024-01-11 17:59:56 +01:00
72a2914c24 correct git target 2024-01-11 17:52:29 +01:00
1d5bc8a9c1 why? 2024-01-11 17:51:01 +01:00
892c412fd9 let's tune it down 2024-01-11 17:46:25 +01:00
b6f7ead955 whoopsie 2024-01-11 17:44:58 +01:00
f033ba16eb correct version 2024-01-11 17:43:31 +01:00
f3ae2c424b use octodns 2024-01-11 17:42:35 +01:00
36035ee84d bump immich version 2024-01-11 10:08:12 +01:00
50679b400a Merge pull request 'Update actualbudget/actual-server Docker tag to v24' (#28) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #28
2024-01-10 16:08:35 +00:00
a68fb5f0a7 Update actualbudget/actual-server Docker tag to v24 2024-01-10 13:00:43 +00:00
5792367b8b Add finance to auto deploy 2024-01-10 13:15:42 +01:00
3699b79f1a let's try these monitorings 2024-01-08 15:48:38 +01:00
e473abda12 Merge pull request 'Update Helm release grafana to v7.0.21' (#25) from renovate/grafana-7.x into main
Reviewed-on: #25
2024-01-08 13:01:14 +00:00
f67f586006 Update Helm release grafana to v7.0.21 2024-01-08 10:00:33 +00:00
61e1276f02 maybe like that 2024-01-07 12:30:51 +01:00
111fd35fc3 needed? 2024-01-07 12:18:06 +01:00
cc4148fb8a correct crds 2024-01-07 12:16:47 +01:00
f1e624985f come on 2024-01-07 12:15:10 +01:00
c8d7d3c854 use traefik 2024-01-07 12:12:46 +01:00
4880503609 Is actually a token 2024-01-07 12:06:53 +01:00
f905ce1611 maybe it wes a token actually? 2024-01-07 12:05:42 +01:00
ecfc65ecdd try like this? 2024-01-07 11:59:41 +01:00
7da1d705a4 update authorization 2024-01-07 11:51:20 +01:00
299cbea97e change ingress slightly 2024-01-07 11:41:05 +01:00
b633d61920 update whoami 2024-01-07 11:39:10 +01:00
bfb8244e59 made a dum dum 2024-01-07 11:37:38 +01:00
33c2df9fa3 add external dns 2024-01-07 11:35:52 +01:00
3d84d6bed1 does servicemonitor accept this? 2024-01-04 18:29:18 +01:00
cf6a931097 fix port names 2024-01-04 18:27:03 +01:00
53c3865072 fix label syntax 2024-01-04 18:23:32 +01:00
d09a3509af trying to monitor syncthing 2024-01-04 18:21:26 +01:00
8c0abc16c4 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024' (#24) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #24
2024-01-04 08:45:45 +00:00
399969677f Merge pull request 'Update Helm release immich to v0.3.1' (#22) from renovate/immich-0.x into main
Reviewed-on: #22
2024-01-04 08:44:55 +00:00
762756310a Update homeassistant/home-assistant Docker tag to v2024 2024-01-03 21:00:38 +00:00
ec964be7c3 whoopsie 2023-12-31 18:49:54 +01:00
0603da76b2 update gitea metric collection 2023-12-31 18:40:57 +01:00
a437c4228e update some scraping config 2023-12-31 18:26:45 +01:00
d5aab95186 try as a string 2023-12-31 17:58:15 +01:00
3acb329730 try again 2023-12-31 17:55:22 +01:00
73ce4e340f try again 2023-12-31 17:44:42 +01:00
0d4b6f4605 remove label requiremetns 2023-12-31 17:37:51 +01:00
deeb35bbb6 test monitoring 2023-12-31 17:34:11 +01:00
d4c658a28c match all servicemonitors? 2023-12-31 17:13:58 +01:00
1fcebe033b fix annotations 2023-12-31 17:06:13 +01:00
8fe51863f4 fix tag 2023-12-30 10:48:46 +01:00
c4eda4e75d fix tag 2023-12-30 10:45:23 +01:00
9490015728 maybe like that? 2023-12-30 10:42:23 +01:00
a641df167f remove port names 2023-12-30 10:39:55 +01:00
21d100fb62 update service config 2023-12-30 10:38:59 +01:00
26b06c553a deploy syncthing 2023-12-30 10:30:05 +01:00
d51bfcf7db Merge pull request 'Update Helm release homarr to v1.0.4' (#23) from renovate/homarr-1.x into main
Reviewed-on: #23
2023-12-27 17:27:57 +00:00
788c2436fc Update Helm release homarr to v1.0.4 2023-12-27 17:00:32 +00:00
c9e6d08dcd temporary home page 2023-12-26 14:56:57 +01:00
6b2e9f7165 small updates 2023-12-26 14:54:49 +01:00
8618468534 more ddns verbosity 2023-12-26 14:52:09 +01:00
94d6c0f523 update to match bash syntax 2023-12-26 14:37:43 +01:00
9aca8e9e0b add automatic dns updates 2023-12-26 14:34:57 +01:00
72b7734535 postgres metrics 2023-12-24 14:42:04 +01:00
28f33f8ff7 update misconfigs 2023-12-24 14:09:11 +01:00
4cf26679c6 add prometheus monitoring 2023-12-24 13:44:22 +01:00
1cd4df8b8f update prom cfg 2023-12-24 11:33:32 +01:00
adeb333954 add svc 2023-12-23 20:40:34 +01:00
e6bd080c6e switch to prometheus operator 2023-12-23 20:20:27 +01:00
c9f883eaa6 Update Helm release immich to v0.3.1 2023-12-23 16:00:31 +00:00
014309bad6 add prometheus 2023-12-23 15:39:03 +01:00
c61698fad9 correct vector version 2023-12-22 01:21:35 +01:00
8c21d58529 vectors finally 2023-12-22 00:51:38 +01:00
722b7c3fb6 correct pg version 2023-12-22 00:34:44 +01:00
b852da0321 try bumping the version 2023-12-22 00:19:32 +01:00
9c5affeff6 update immich 2023-12-22 00:06:00 +01:00
b6c2f57acf new db 2023-12-22 00:03:18 +01:00
2e4e033c36 local postgres 2023-12-22 00:00:30 +01:00
285a7541ca fix 2023-12-21 18:02:22 +01:00
dbf58027d8 trying cloudnative postgres 2023-12-21 18:00:20 +01:00
2f9019b6ba fixing pvc 2023-12-21 12:37:29 +01:00
1743ffca74 grafana cleanup 2023-12-21 12:28:48 +01:00
ea7527c143 Merge pull request 'Update Helm release grafana to v7' (#20) from renovate/grafana-7.x into main
Reviewed-on: #20
2023-12-21 11:17:01 +00:00
c27b289866 Update Helm release grafana to v7 2023-12-21 10:00:38 +00:00
4cbd95fd78 Merge pull request 'Update Helm release grafana to v6.61.2' (#19) from renovate/grafana-6.x into main
Reviewed-on: #19
2023-12-21 09:12:50 +00:00
5cfb2a02e3 Merge pull request 'Update Helm release telegraf to v1.8.39' (#18) from renovate/telegraf-1.x into main
Reviewed-on: #18
2023-12-21 09:12:21 +00:00
2f31cd6934 Update Helm release grafana to v6.61.2 2023-12-18 13:00:30 +00:00
4fdd4a39f5 Update Helm release telegraf to v1.8.39 2023-12-18 12:00:33 +00:00
190 changed files with 2107 additions and 3163 deletions

6
.gitignore vendored
View File

@@ -1,2 +1,6 @@
# Kubernetes secrets
*.secret.yaml
charts/
main.key
# Helm Chart files
charts/

6
.gitmodules vendored Normal file
View File

@@ -0,0 +1,6 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git
[submodule "apps/monitoring/dashboards"]
path = apps/monitoring/dashboards
url = ssh://git@git.kluster.moll.re:2222/remoll/grafana-dashboards.git

View File

@@ -1,11 +1,9 @@
# Kluster setup and IaaC using argoCD
### Initial setup
#### Requirements:
- A running k3s instance run:
- `metalLB` deployed
- A running k3s instance
- `sealedsecrets` deployed
#### Installing argo and the app-of-apps
@@ -29,5 +27,21 @@ The app-of-apps will bootstrap a fully featured cluster with the following compo
- immich
- ...
#### Recap
- install sealedsecrets see [README](./infrastructure/sealedsecrets/README.md)
```bash
kubectl apply -k infrastructure/sealedsecrets
kubectl apply -f infrastructure/sealedsecrets/main.key
kubectl delete pod -n kube-system -l name=sealed-secrets-controller
```
- install argocd
```bash
kubectl apply -k infrastructure/argocd
```
- wait...
### Adding an application
todo

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: adguard-tls-ingress

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.43
newTag: v0.107.48
namespace: adguard

View File

@@ -24,6 +24,8 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-tcp
nodePort: 31306
@@ -46,6 +48,7 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-udp
nodePort: 30547

View File

@@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: affine
spec:
selector:
matchLabels:
app: affine
template:
metadata:
labels:
app: affine
spec:
containers:
- name: affine
image: affine
resources:
limits:
memory: "512Mi"
cpu: "1"
env:
- name: AFFINE_SERVER_HOST
value: "affine.kluster.moll.re"
- name: AFFINE_SERVER_PORT
value: "443"
- name: AFFINE_SERVER_HTTPS
value: "true"
- name: AFFINE_CONFIG_PATH
value: "/root/.affine/config"
- name: AFFINE_ADMIN_EMAIL
value: "me@moll.re"
- name: AFFINE_ADMIN_PASSWORD
value: "password"
- name: TELEMETRY_ENABLE
value: "false"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: postgres-credentials
key: url
- name: NODE_OPTIONS
value: "--import=./scripts/register.js"
- name: NODE_ENV
value: "production"
ports:
- containerPort: 3010
volumeMounts:
- name: affine-data
mountPath: /root/.affine/storage
- name: affine-config
mountPath: /root/.affine/config
volumes:
- name: affine-data
persistentVolumeClaim:
claimName: affine-data
- name: affine-config
persistentVolumeClaim:
claimName: affine-config

15
apps/affine/ingress.yaml Normal file
View File

@@ -0,0 +1,15 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: affine-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`affine.kluster.moll.re`)
kind: Rule
services:
- name: affine-web
port: 3010
tls:
certResolver: default-tls

View File

@@ -0,0 +1,20 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: affine
resources:
- namespace.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- postgres.yaml
- pvc.yaml
- postgres-credentials.secret.yaml
images:
- name: affine
newName: ghcr.io/toeverything/affine-graphql
newTag: stable

20
apps/affine/postgres.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: affine-postgres
spec:
instances: 1
bootstrap:
initdb:
owner: affine
database: affine
secret:
name: postgres-credentials
storage:
size: 1Gi
pvcTemplate:
storageClassName: "nfs-client"
resources:
requests:
storage: "1Gi"

23
apps/affine/pvc.yaml Normal file
View File

@@ -0,0 +1,23 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: affine-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 15Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: affine-config
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

10
apps/affine/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: affine-web
spec:
selector:
app: affine
ports:
- port: 3010
targetPort: 3010

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ocis-statefulset
spec:
selector:
matchLabels:
app: ocis
serviceName: ocis-web
replicas: 1
template:
metadata:
labels:
app: ocis
spec:
containers:
- name: ocis
image: ocis
resources:
limits:
memory: "1Gi"
cpu: "1000m"
env:
- name: OCIS_INSECURE
value: "true"
- name: OCIS_URL
value: "https://ocis.kluster.moll.re"
- name: OCIS_LOG_LEVEL
value: "debug"
ports:
- containerPort: 9200
volumeMounts:
- name: config
mountPath: /etc/ocis
# - name: ocis-config-file
# mountPath: /etc/ocis/config.yaml
- name: data
mountPath: /var/lib/ocis
volumes:
# - name: ocis-config
# persistentVolumeClaim:
# claimName: ocis-config
- name: config
secret:
secretName: ocis-config
- name: data
persistentVolumeClaim:
claimName: ocis

18
apps/files/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ocis-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`ocis.kluster.moll.re`)
kind: Rule
services:
- name: ocis-web
port: 9200
scheme: https
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- service.yaml
- pvc.yaml
- deployment.yaml
- ocis-config.sealedsecret.yaml
namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "5.0.3"

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +1,11 @@
```
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
name: ocis
spec:
storageClassName: nfs-client
storageClassName: "nfs-client"
accessModes:
- ReadWriteMany
- ReadWriteOnce
resources:
requests:
storage: 1Mi
```
storage: 150Gi

10
apps/files/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: ocis-web
spec:
selector:
app: ocis
ports:
- port: 9200
targetPort: 9200

View File

@@ -1,12 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: finance
name: actualbudget
labels:
app: actualbudget
spec:
# deployment running a single container
selector:
matchLabels:
app: actualbudget
@@ -18,83 +16,19 @@ spec:
spec:
containers:
- name: actualbudget
image: actualbudget/actual-server:latest
image: actualbudget
imagePullPolicy: Always
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: actualbudget-data-nfs
- name: data
mountPath: /data
ports:
- containerPort: 5006
name: http
protocol: TCP
volumes:
- name: actualbudget-data-nfs
- name: data
persistentVolumeClaim:
claimName: actualbudget-data-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
# storageClassName: fast
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
# selector:
# matchLabels:
# directory: "journal-data"
---
apiVersion: v1
kind: Service
metadata:
namespace: finance
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: finance
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls
claimName: data

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "data"
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP

View File

@@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly-importer
name: firefly-importer
namespace: finance
spec:
selector:
matchLabels:
app: firefly-importer
template:
metadata:
labels:
app: firefly-importer
spec:
containers:
- image: fireflyiii/data-importer:latest
imagePullPolicy: Always
name: firefly-importer
resources: {}
ports:
- containerPort: 8080
env:
- name: FIREFLY_III_ACCESS_TOKEN
value: redacted
- name: FIREFLY_III_URL
value: firefly-http:8080
# - name: APP_URL
# value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
---
apiVersion: v1
kind: Service
metadata:
name: firefly-importer-http
namespace: finance
labels:
app: firefly-importer-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly-importer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-importer-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`importer.finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-importer-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly
name: firefly
namespace: finance
spec:
selector:
matchLabels:
app: firefly
template:
metadata:
labels:
app: firefly
spec:
containers:
- image: fireflyiii/core:latest
imagePullPolicy: Always
name: firefly
resources: {}
ports:
- containerPort: 8080
env:
- name: APP_ENV
value: "local"
- name: APP_KEY
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
- name: DB_CONNECTION
value: sqlite
- name: APP_URL
value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
volumeMounts:
- mountPath: /var/www/html/storage/database
name: firefly-database
volumes:
- name: firefly-database
persistentVolumeClaim:
claimName: firefly-database-nfs
---
apiVersion: v1
kind: Service
metadata:
name: firefly-http
namespace: finance
labels:
app: firefly-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: firefly-database-nfs
labels:
directory: firefly
spec:
# storageClassName: fast
# volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "1G"
nfs:
path: /firefly # inside nfs part.
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: firefly-database-nfs
spec:
resources:
requests:
storage: "1G"
# storageClassName: fast
accessModes:
- ReadOnlyMany
volumeName: firefly-database-nfs

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: finance
resources:
- namespace.yaml
- actualbudget.pvc.yaml
- actualbudget.deployment.yaml
- actualbudget.service.yaml
- actualbudget.ingress.yaml
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.5.0

View File

@@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homarr
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
helmCharts:
- name: homarr
releaseName: homarr
repo: https://oben01.github.io/charts/
version: 1.0.1
valuesFile: values.yaml

View File

View File

@@ -1,60 +0,0 @@
# -- Default values for homarr
# -- Declare variables to be passed into your templates.
# -- Number of replicas
replicaCount: 1
env:
# -- Your local time zone
TZ: "Europe/Berlin"
# -- Colors and preferences, possible values dark / light
DEFAULT_COLOR_SCHEME: "dark"
# -- Service configuration
service:
# -- Service type
type: ClusterIP
# -- Service port
port: 7575
# -- Service target port
targetPort: 7575
# -- Ingress configuration
ingress:
enabled: false
persistence:
- name: homarr-config
# -- Enable homarr-config persistent storage
enabled: true
# -- homarr-config storage class name
storageClassName: "nfs-client"
# -- homarr-config access mode
accessMode: "ReadWriteOnce"
persistentVolumeReclaimPolicy: Retain
# -- homarr-config storage size
size: "50Mi"
# -- homarr-config mount path inside the pod
mountPath: "/app/data/configs"
- name: homarr-database
# -- Enable homarr-database persistent storage
enabled: true
# -- homarr-database storage class name
storageClassName: "nfs-client"
# -- homarr-database access mode
accessMode: "ReadWriteOnce"
# -- homarr-database storage size
size: "50Mi"
# -- homarr-database mount path inside the pod
mountPath: "/app/database"
- name: homarr-icons
# -- Enable homarr-icons persistent storage
enabled: true
# -- homarr-icons storage class name
storageClassName: "nfs-client"
# -- homarr-icons access mode
accessMode: "ReadWriteOnce"
# -- homarr-icons storage size
size: "50Mi"
# -- homarr-icons mount path inside the pod
mountPath: "/app/public/icons"

View File

@@ -1,4 +1,3 @@
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -22,7 +21,7 @@ spec:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: config
- name: config-dir
mountPath: /config
resources:
requests:
@@ -32,6 +31,7 @@ spec:
cpu: "2"
memory: "1Gi"
volumes:
- name: config
- name: config-dir
persistentVolumeClaim:
claimName: homeassistant-nfs
claimName: config

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homeassistant-ingress
@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`home.kluster.moll.re`)
- match: Host(`home.kluster.moll.re`) && !Path(`/api/prometheus`)
middlewares:
- name: homeassistant-websocket
kind: Rule
@@ -15,9 +15,8 @@ spec:
port: 8123
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: homeassistant-websocket
@@ -27,6 +26,3 @@ spec:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@@ -9,8 +9,10 @@ resources:
- pvc.yaml
- service.yaml
- deployment.yaml
- servicemonitor.yaml
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2023.12"
newTag: "2024.5"

View File

@@ -1,28 +1,11 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: homeassistant-nfs
spec:
# storageClassName: slow
capacity:
storage: "1Gi"
# volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/homeassistant
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homeassistant-nfs
name: config
spec:
storageClassName: ""
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: homeassistant-nfs

View File

@@ -7,4 +7,5 @@ spec:
app: homeassistant
ports:
- port: 8123
targetPort: 8123
targetPort: 8123
name: http

View File

@@ -0,0 +1,13 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: homeassistant-servicemonitor
labels:
app: homeassistant
spec:
selector:
matchLabels:
app: homeassistant
endpoints:
- port: http
path: /api/prometheus

View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
labels:
app.kubernetes.io/name: homepage
data:
kubernetes.yaml: "" #|
# mode: cluster
settings.yaml: |
title: "Homepage"
background: https://images.unsplash.com/photo-1547327132-5d20850c62b5?q=80&w=3870&auto=format&fit=crop
cardBlur: sm
#settings.yaml: |
# providers:
# longhorn:
# url: https://longhorn.my.network
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/moll-re
services.yaml: |
- Media:
- Jellyfin backend:
href: https://media-backend.kluster.moll.re
ping: media-backend.kluster.moll.re
- Jellyfin vue:
href: https://media.kluster.moll.re
ping: media.kluster.moll.re
- Immich:
href: https://immich.kluster.moll.re
ping: immich.kluster.moll.re
- Productivity:
- OwnCloud:
href: https://ocis.kluster.moll.re
ping: ocis.kluster.moll.re
- ToDo:
href: https://todos.kluster.moll.re
ping: todos.kluster.moll.re
- Finance:
href: https://finance.kluster.moll.re
ping: finance.kluster.moll.re
- Home:
- Home Assistant:
href: https://home.kluster.moll.re
ping: home.kluster.moll.re
- Grafana:
href: https://grafana.kluster.moll.re
ping: grafana.kluster.moll.re
- Recipes:
href: https://recipes.kluster.moll.re
ping: recipes.kluster.moll.re
- Infra:
- Gitea:
href: https://git.kluster.moll.re
ping: git.kluster.moll.re
- ArgoCD:
href: https://argocd.kluster.moll.re
ping: argocd.kluster.moll.re
widgets.yaml: |
# - kubernetes:
# cluster:
# show: true
# cpu: true
# memory: true
# showLabel: true
# label: "cluster"
# nodes:
# show: true
# cpu: true
# memory: true
# showLabel: true
- search:
provider: duckduckgo
- openmeteo:
label: Zürich # optional
latitude: 47.24236
longitude: 8.30439
units: metric # or imperial
cache: 30 # Time in minutes to cache API responses, to stay within limits
format: # optional, Intl.NumberFormat options
maximumFractionDigits: 1
- datetime:
locale: de
format:
dateStyle: long
timeStyle: short
- adguard:
url: http://adguard-home-web.adguard-home:3000
docker.yaml: ""

View File

@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
# serviceAccountName: homepage
# automountServiceAccountToken: true
dnsPolicy: ClusterFirst
# enableServiceLinks: true
containers:
- name: homepage
image: homepage
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: config
subPath: custom.js
- mountPath: /app/config/custom.css
name: config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: config
configMap:
name: config
- name: logs
emptyDir: {}

View File

@@ -1,7 +1,8 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homarr-ingress
name: homepage-ingressroute
spec:
entryPoints:
- websecure
@@ -9,7 +10,7 @@ spec:
- match: Host(`start.kluster.moll.re`)
kind: Rule
services:
- name: homarr
port: 7575
- name: homepage-web
port: 3000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homepage
resources:
- namespace.yaml
- deployment.yaml
- service.yaml
- configmap.yaml
- ingress.yaml
images:
- name: homepage
newName: ghcr.io/gethomepage/homepage
newTag: v0.8.13

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: homepage-web
labels:
app.kubernetes.io/name: homepage
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: stripprefix
@@ -7,7 +7,7 @@ spec:
prefixes:
- /api
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: websocket
@@ -18,7 +18,7 @@ spec:
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: immich-ingressroute

View File

@@ -4,6 +4,7 @@ resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
namespace: immich
@@ -11,6 +12,13 @@ namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.2.0
version: 0.6.0
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.103.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.103.1

29
apps/immich/postgres.yaml Normal file
View File

@@ -0,0 +1,29 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-postgres
spec:
instances: 1
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.2
bootstrap:
initdb:
owner: immich
database: immich
secret:
name: postgres-password
postgresql:
shared_preload_libraries:
- "vectors.so"
storage:
size: 1Gi
pvcTemplate:
storageClassName: ""
resources:
requests:
storage: "1Gi"
volumeName: immich-postgres
monitoring:
enablePodMonitor: true

View File

@@ -24,3 +24,17 @@ spec:
requests:
storage: "50Gi"
volumeName: immich-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-postgres
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich-postgres
server: 192.168.1.157
# later used by cnpg

View File

@@ -2,15 +2,11 @@
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
## Refer there for more detail about the supported values
image:
tag: v1.90.2
# These entries are shared between all the Immich components
env:
REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}'
DB_HOSTNAME: "postgres-postgresql.postgres"
DB_HOSTNAME: "immich-postgres-rw"
DB_USERNAME:
valueFrom:
secretKeyRef:
@@ -26,11 +22,7 @@ env:
secretKeyRef:
name: postgres-password
key: password
TYPESENSE_ENABLED: "{{ .Values.typesense.enabled }}"
TYPESENSE_API_KEY: "{{ .Values.typesense.env.TYPESENSE_API_KEY }}"
TYPESENSE_HOST: '{{ printf "%s-typesense" .Release.Name }}'
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_SERVER_URL: '{{ printf "http://%s-server:3001" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
immich:
@@ -52,18 +44,6 @@ redis:
auth:
enabled: false
typesense:
enabled: true
env:
TYPESENSE_DATA_DIR: /tsdata
TYPESENSE_API_KEY: typesense
persistence:
tsdata:
# Enabling typesense persistence is recommended to avoid slow reindexing
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
# Immich components
server:

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-vue-ingress
@@ -17,7 +17,7 @@ spec:
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-backend-ingress
@@ -26,7 +26,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`media-backend.kluster.moll.re`)
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
middlewares:
- name: jellyfin-websocket
- name: jellyfin-server-headers
@@ -37,7 +37,7 @@ spec:
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: jellyfin-websocket
@@ -48,7 +48,7 @@ spec:
Connection: keep-alive, Upgrade
Upgrade: WebSocket
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: jellyfin-server-headers
@@ -60,4 +60,4 @@ spec:
accessControlAllowMethods: [ "GET","HEAD","OPTIONS" ] # "POST","PUT"
accessControlAllowOriginList:
- "*"
accessControlMaxAge: 100
accessControlMaxAge: 100

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/
# the metrics are available at /metrics but blocked by the ingress

View File

@@ -14,7 +14,7 @@ resources:
images:
- name: jellyfin/jellyfin
newName: jellyfin/jellyfin
newTag: 10.8.13
newTag: 10.9.0
- name: ghcr.io/jellyfin/jellyfin-vue
newName: ghcr.io/jellyfin/jellyfin-vue
newTag: stable-rc.0.3.1

View File

@@ -1,39 +1,21 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-config-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/jellyfin-config
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-config-nfs
name: config
spec:
storageClassName: ""
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: jellyfin-config-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-data-nfs
name: media
spec:
capacity:
storage: "1Ti"
@@ -46,8 +28,7 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-data-nfs
name: media
spec:
storageClassName: ""
accessModes:
@@ -55,4 +36,4 @@ spec:
resources:
requests:
storage: "1Ti"
volumeName: jellyfin-data-nfs
volumeName: media

View File

@@ -20,13 +20,14 @@ spec:
cpu: "2"
ports:
- containerPort: 8096
name: jellyfin
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: jellyfin-config
- name: config
mountPath: /config
- name: jellyfin-data
- name: media
mountPath: /media
livenessProbe:
httpGet:
@@ -35,10 +36,10 @@ spec:
initialDelaySeconds: 100
periodSeconds: 15
volumes:
- name: jellyfin-config
- name: config
persistentVolumeClaim:
claimName: jellyfin-config-nfs
- name: jellyfin-data
claimName: config
- name: media
persistentVolumeClaim:
claimName: jellyfin-data-nfs
claimName: media

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: Service
metadata:
name: jellyfin-server
labels:
app: jellyfin-server-service
spec:
selector:
app: jellyfin-server

7
apps/minecraft/README.md Normal file
View File

@@ -0,0 +1,7 @@
## Sending a command
```
kubectl exec -it -n minecraft deploy/minecraft-server -- /bin/bash
mc-send-to-console /help
# or directly
kubectl exec -it -n minecraft deploy/minecraft-server -- mc-send-to-console /help
```

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft-server
spec:
selector:
matchLabels:
app: minecraft-server
template:
metadata:
labels:
app: minecraft-server
spec:
containers:
- name: minecraft-server
image: minecraft
resources:
limits:
memory: "4000Mi"
cpu: "2500m"
requests:
memory: "1000Mi"
cpu: "500m"
ports:
- containerPort: 25565
env:
- name: EULA
value: "TRUE"
- name: MODPACK
value: "https://www.curseforge.com/api/v1/mods/711537/files/5076228/download"
- name: VERSION
value: "1.18.2"
# - name: VERSION
# value: "1.16.5"
# - name: MODPACK
# value: "https://mediafilez.forgecdn.net/files/3602/5/VaultHunters-OfficialModpack-1.12.1-Server.zip"
- name: INIT_MEMORY
value: "1G"
- name: MAX_MEMORY
value: "3G"
- name: MOTD
value: "VaultHunters baby!"
- name: ENABLE_RCON
value: "false"
- name: CREATE_CONSOLE_IN_PIPE
value: "true"
- name: ONLINE_MODE
value: "true"
volumeMounts:
- name: minecraft-data
mountPath: /data
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minecraft
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
images:
- name: minecraft
newName: itzg/minecraft-server
newTag: java21

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

11
apps/minecraft/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: minecraft-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: minecraft-server
spec:
selector:
app: minecraft-server
ports:
- port: 25565
targetPort: 25565
type: LoadBalancer
loadBalancerIP: 192.168.3.4

View File

@@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
spec:
encryptedData:
password: AgBe8isrCWd5MuaQq5CpA+P3fDizCCDo23BVauaBJLuMRIYbVwpfahaJW7Ocj3LTXwdeVVPBrOk2D6vESUXu6I0EWc3y/NFN4ZezScxMcjmeaAb+z1zWwdH0FynTPJYOxv1fis1FDTkXDmGy3FXo5NDK9ET899TtulKFkh7UqSxdrRWbD3pegJgqKGPIqDCTAxZN/ssiccfWGS4lHqQBJkXn8DeampcKwjOCvgaBdilF03GoSfpgsqa2Iw2SfTDEobWBWVMMK/RB3/Oi/YJkGwMW3ECUxvTDam8gb0RFA1xjWXoYTLVVP5fK7q7x63ns51HebloxAP1GBrt138N/iDrfbGfjNP8Lx0NFl5y5bTgYN/z8DVTOFf90xxWe+YYERdwllg0Ci1JLNbA+NszXTD4L/HC7a8XuBfjRzxMTeymNjR76jzfPkH6v1EvesOduTfSrahPgS0qS+eGOier1rHxj3EBRhOScY1ut5Bq4oJMNId9nMVbVa6xyq2HyxuJHXV+j6h5FGHmEXn9gIR7wGp8RhtPhKgVGLrHcbHZ5Th2E7eomz1T2NK/ezNP8ZhcwOj/lyGywlW0vhU798zpWhMf57k2OPeuMlfs8Y8y74epBdyBjsrMR4EDctF8RZR3vraxENiMJ6kk1gqKj04ir6HwL7blqwiybIFFnJrp2j7MzgjS4SQ687qMX5Zf5XT03aEE+9W9Epy73tT7zVQKdENCQlcm5
user: AgAdiOivMn0d+nYjYycMZz9QSiS/9QqwHPJQMHkE7/IOou+CJtBknlETNtdv84KZgBQTucufYqu3LR3djOBpdnQsYbIXDxPFgRZQ11pwu/sO2EGifDk218yyzzfZMvx1FL7JL4LI1rKoiHycZowCwsAjEtlICVOOYv1/Plki+6MHXiAGG4r/yUhugGx3VLLX+Poq8oaTeHndgSsFXJege8SfgYR4TsC7pQgsM1UQEFncGIhJYTD2ashmUxFJ+7CJjHqPR0lFRrZXmFvPwTYTCMT+tnSHnCFWtTht8cEi1NxA4kD/eKEX0rOol15EUZnFUws2WqWI634TbyGwZ7km/Yw4XoDxiQR4ar6ulkqb/djcc3cWDYE7PF1m1c+r3iog85S5CSfZ5EvdCHHrbPN9uO2gmoRQWiR5qI70YMxBSnkeLZWN05O1vUuopdXFDTafY7YskxLEdIGHGqFUpUrJZOvBB0zNBdHGgYxFzb5pNmMCC5LPlOuoKjV4yskh9Tgovz06aAvsPxn2WWx6NOJambeziKB5OmSKvPsFofViyGBekVAWSWtt9yJe6lu5OKpBEiA6xhGhQ4ZryTXu9wvVALuPSIwBFITv85sIxjJb80qhJ51wb12QgzLLcPby0HSanyBI1M4jfsXWpK8gIAbDNO+eD7z3PhD9Y/5hPqYKXZ37Geyq23xiyxG8XDj6cL+Ie6k8XipayI4=
template:
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
type: Opaque

View File

@@ -1,5 +1,5 @@
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
metadata:
name: grafana-ingress
spec:

View File

@@ -2,8 +2,6 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
labels:
directory: grafana
spec:
capacity:
storage: "1Gi"
@@ -18,11 +16,10 @@ kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: grafana
volumeName: grafana-nfs

View File

@@ -1,570 +1,55 @@
rbac:
create: true
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
# useExistingRole: name-of-some-(cluster)role
pspEnabled: true
pspUseAppArmor: true
namespaced: false
extraRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
extraClusterRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
serviceAccount:
create: true
name:
nameTest:
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
autoMount: true
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Create HorizontalPodAutoscaler object for deployment type
#
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
readinessProbe:
httpGet:
path: /api/health
port: 3000
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: grafana/grafana
tag: 9.0.2
sha: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
# pullSecrets:
# - myRegistrKeySecretName
testFramework:
enabled: true
image: "bats/bats"
tag: "v1.4.1"
imagePullPolicy: IfNotPresent
securityContext: {}
securityContext:
runAsUser: 472
runAsGroup: 472
fsGroup: 472
containerSecurityContext:
{}
# Extra configmaps to mount in grafana pods
# Values are templated.
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/grafana/ssl/
# subPath: certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraEmptyDirMounts: []
# - name: provisioning-notifiers
# mountPath: /etc/grafana/provisioning/notifiers
# Apply extra labels to common labels.
extraLabels: {}
## Assign a PriorityClassName to pods if set
# priorityClassName:
downloadDashboardsImage:
repository: curlimages/curl
tag: 7.73.0
sha: ""
pullPolicy: IfNotPresent
downloadDashboards:
env: {}
envFromSecret: ""
resources: {}
## Pod Annotations
# podAnnotations: {}
## Pod Labels
# podLabels: {}
podPortName: grafana
## Deployment annotations
# annotations: {}
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
enabled: true
type: ClusterIP
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
annotations: {}
labels: {}
portName: service
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 1m
scheme: http
tlsConfig: {}
scrapeTimeout: 30s
relabelings: []
extraExposePorts: []
# - name: keycloak
# port: 8080
# targetPort: 8080
# type: ClusterIP
# overrides pod.spec.hostAliases in the grafana deployment's pods
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "my.host.com"
ingress:
enabled: true
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {
kubernetes.io/ingress.class: nginx,
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
enabled: false
# pathType is only for k8s >= 1.1=
pathType: Prefix
hosts:
- grafana.kluster.moll.re
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
tls:
- hosts:
- grafana.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Additional init containers (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
extraInitContainers: []
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
extraContainers: ""
# extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## Volumes that can be used in init containers that will not be mounted to deployment pods
extraContainerVolumes: []
# - name: volume-from-secret
# secret:
# secretName: secret-to-mount
# - name: empty-dir-volume
# emptyDir: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
type: pvc
enabled: true
# storageClassName: default
accessModes:
- ReadWriteOnce
size: 10Gi
# annotations: {}
finalizers:
- kubernetes.io/pvc-protection
# selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
existingClaim: grafana-nfs
## If persistence is not enabled, this allows to mount the
## local storage in-memory to improve performance
##
inMemory:
enabled: false
## The maximum usage on memory medium EmptyDir would be
## the minimum value between the SizeLimit specified
## here and the sum of memory limits of all containers in a pod
##
# sizeLimit: 300Mi
initChownData:
## If false, data ownership will not be reset at startup
## This allows the prometheus-server to be run with an arbitrary user
##
enabled: true
## initChownData container image
##
image:
repository: busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
## initChownData resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword
# Use an existing secret for the admin user.
# credentials
admin:
## Name of the secret. Can be templated.
existingSecret: ""
userKey: admin-user
passwordKey: admin-password
existingSecret: grafana-admin-secret
userKey: user
passwordKey: password
## Define command to be executed at startup by grafana container
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
## Default is "run.sh" as defined in grafana's Dockerfile
# command:
# - "sh"
# - "/run.sh"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Extra environment variables that will be pass onto deployment pods
##
## to provide grafana with access to CloudWatch on AWS EKS:
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
## same oidc eks provider as noted before (same as the existing line)
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
##
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
##
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
##
## env:
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
## AWS_REGION: us-east-1
##
## 5. uncomment the EKS section in extraSecretMounts: below
## 6. uncomment the annotation section in the serviceAccount: above
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
env: {}
## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
## Renders in container spec as:
## env:
## ...
## - name: <key>
## valueFrom:
## <value rendered as YAML>
envValueFrom: {}
# ENV_NAME:
# configMapKeyRef:
# name: configmap-name
# key: value_key
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc. Value is templated.
envFromSecret: ""
## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envRenderSecret: {}
## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
## Name is templated.
envFromSecrets: []
## - name: secret-name
## optional: true
## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key.
## Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
envFromConfigMaps: []
## - name: configmap-name
## optional: true
# Inject Kubernetes services as environment variables.
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
enableServiceLinks: true
## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: grafana-secret-files
# readOnly: true
# subPath: ""
#
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
# - name: aws-iam-token
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
# readOnly: true
# projected:
# defaultMode: 420
# sources:
# - serviceAccountToken:
# audience: sts.amazonaws.com
# expirationSeconds: 86400
# path: token
#
# for CSI e.g. Azure Key Vault use the following
# - name: secrets-store-inline
# mountPath: /run/secrets
# readOnly: true
# csi:
# driver: secrets-store.csi.k8s.io
# readOnly: true
# volumeAttributes:
# secretProviderClass: "akv-grafana-spc"
# nodePublishSecretRef: # Only required when using service principal mode
# name: grafana-akv-creds # Only required when using service principal mode
## Additional grafana server volume mounts
# Defines additional volume mounts.
extraVolumeMounts: []
# - name: extra-volume-0
# mountPath: /mnt/volume0
# readOnly: true
# existingClaim: volume-claim
# - name: extra-volume-1
# mountPath: /mnt/volume1
# readOnly: true
# hostPath: /usr/shared/
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
lifecycleHooks: {}
# postStart:
# exec:
# command: []
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: Prometheus
# type: prometheus
# url: http://prometheus-prometheus-server
# access: proxy
# isDefault: true
# - name: CloudWatch
# type: cloudwatch
# access: proxy
# uid: cloudwatch
# editable: false
# jsonData:
# authType: default
# defaultRegion: us-east-1
## Configure notifiers
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
##
notifiers: {}
# notifiers.yaml:
# notifiers:
# - name: email-notifier
# type: email
# uid: email1
# # either:
# org_id: 1
# # or
# org_name: Main Org.
# is_default: true
# settings:
# addresses: an_email_address@example.com
# delete_notifiers:
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
# providers:
# - name: 'default'
# orgId: 1
# folder: ''
# type: file
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# custom-dashboard:
# file: dashboards/custom-dashboard.json
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
# token: ''
# local-dashboard-base64:
# url: https://example.com/repository/test-b64.json
# token: ''
# b64content: true
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Thanos
type: prometheus
url: http://thanos-querier.prometheus.svc:9090
isDefault: true
- name: Prometheus
type: prometheus
url: http://prometheus.prometheus.svc:9090
isDefault: false
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
@@ -573,301 +58,12 @@ dashboards: {}
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
dashboardsConfigMaps:
default: grafana-dashboards
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
paths:
data: /var/lib/grafana/
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## grafana Authentication can be enabled with the following values on grafana.ini
# server:
# The full public facing url you use in browser, used for redirects and emails
# root_url:
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
# auth.github:
# enabled: false
# allow_sign_up: false
# scopes: user:email,read:org
# auth_url: https://github.com/login/oauth/authorize
# token_url: https://github.com/login/oauth/access_token
# api_url: https://api.github.com/user
# team_ids:
# allowed_organizations:
# client_id:
# client_secret:
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
# auth.ldap:
# enabled: true
# allow_sign_up: true
# config_file: /etc/grafana/ldap.toml
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
enabled: false
# `existingSecret` is a reference to an existing secret containing the ldap configuration
# for Grafana in a key `ldap-toml`.
existingSecret: ""
# `config` is the content of `ldap.toml` that will be stored in the created secret
config: ""
# config: |-
# verbose_logging = true
# [[servers]]
# host = "my-ldap-server"
# port = 636
# use_ssl = true
# start_tls = false
# ssl_skip_verify = false
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: ""
userKey: "user"
passwordKey: "password"
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.15.6
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
securityContext: {}
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
readinessProbe: {}
livenessProbe: {}
dashboards:
wal: true
default_theme: dark
unified_alerting:
enabled: false
SCProvider: true
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# value of label that the configmaps with dashboards are set to
labelValue: null
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
folder: /tmp/dashboards
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
defaultFolderName: null
# Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces.
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
# Absolute path to shell script to execute after a configmap got reloaded
script: null
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600
#
# watchClientTimeout: is a client-side timeout, configuring your local socket.
# If you have a network outage dropping all packets with no RST/FIN,
# this is how long your client waits before realizing & dropping the connection.
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# provider configuration that lets grafana manage the dashboards
provider:
# name of the provider, should be unique
name: sidecarProvider
# orgid as configured in grafana
orgid: 1
# folder in which the dashboards should be imported in grafana
folder: ''
# type of the provider
type: file
# disableDelete to activate a import-only behaviour
disableDelete: false
# allow updating provisioned dashboards from the UI
allowUiUpdates: false
# allow Grafana to replicate dashboard structure from filesystem
foldersFromFilesStructure: false
# Additional dashboard sidecar volume mounts
extraMounts: []
# Sets the size limit of the dashboard sidecar emptyDir volume
sizeLimit: {}
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
# value of label that the configmaps with datasources are set to
labelValue: null
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload datasources
reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any datasources defined at startup time.
initDatasources: false
# Sets the size limit of the datasource sidecar emptyDir volume
sizeLimit: {}
plugins:
enabled: false
# label that the configmaps with plugins are marked with
label: grafana_plugin
# value of label that the configmaps with plugins are set to
labelValue: null
# If specified, the sidecar will search for plugin config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload plugins
reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any plugins defined at startup time.
initPlugins: false
# Sets the size limit of the plugin sidecar emptyDir volume
sizeLimit: {}
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
label: grafana_notifier
# If specified, the sidecar will search for notifier config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
# Sets the size limit of the notifier sidecar emptyDir volume
sizeLimit: {}
## Override the deployment namespace
##
namespaceOverride: ""
## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10
## Add a seperate remote image renderer deployment/service
imageRenderer:
# Enable the image-renderer deployment & service
enabled: false
replicas: 1
image:
# image-renderer Image repository
repository: grafana/grafana-image-renderer
# image-renderer Image tag
tag: latest
# image-renderer Image sha (optional)
sha: ""
# image-renderer ImagePullPolicy
pullPolicy: Always
# extra environment variables
env:
HTTP_HOST: "0.0.0.0"
# RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
# RENDERING_MODE: clustered
# IGNORE_HTTPS_ERRORS: true
# image-renderer deployment serviceAccount
serviceAccountName: ""
# image-renderer deployment securityContext
securityContext: {}
# image-renderer deployment Host Aliases
hostAliases: []
# image-renderer deployment priority class
priorityClassName: ''
service:
# Enable the image-renderer service
enabled: true
# image-renderer service port name
portName: 'http'
# image-renderer service port used by both service and deployment
port: 8081
targetPort: 8081
# If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
grafanaProtocol: http
# In case a sub_path is used this needs to be added to the image renderer callback
grafanaSubPath: ""
# name of the image-renderer port on the pod
podPortName: http
# number of image-renderer replica sets to keep
revisionHistoryLimit: 10
networkPolicy:
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
limitIngress: true
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
limitEgress: false
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# Create a dynamic manifests via values:
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"
# kind: ExternalSecret
# metadata:
# name: grafana-secrets
# spec:
# backendType: gcpSecretsManager
# data:
# - key: grafana-admin-password
# name: adminPassword

View File

@@ -2,13 +2,9 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
labels:
directory: influxdb
spec:
# storageClassName: slow
capacity:
storage: "10Gi"
# volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
@@ -26,6 +22,4 @@ spec:
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: influxdb
volumeName: influxdb-nfs

View File

@@ -6,24 +6,15 @@ namespace: monitoring
resources:
- namespace.yaml
- grafana.pvc.yaml
- influxdb.pvc.yaml
# - influxdb.pvc.yaml
- grafana.ingress.yaml
- grafana-admin.sealedsecret.yaml
- dashboards/
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 6.56.2
version: 7.3.9
valuesFile: grafana.values.yaml
- releaseName: influxdb
name: influxdb2
repo: https://helm.influxdata.com/
version: 2.1.2
valuesFile: influxdb.values.yaml
- releaseName: telegraf-speedtest
name: telegraf
repo: https://helm.influxdata.com/
version: 1.8.27
valuesFile: telegraf-speedtest.values.yaml

View File

@@ -1,5 +0,0 @@
### Runninf `occ` commands:
```
su -s /bin/bash www-data -c "php occ user:list"
```

View File

@@ -1,16 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud.kluster.moll.re`)
kind: Rule
services:
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.sealedsecret.yaml
namespace: nextcloud
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

View File

@@ -1,22 +0,0 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
}
},
"encryptedData": {
"password": "AgCTmvBe9YFnyWOdz02rxr0hTXnWuVLeUt5dpieWMzl4cVMBj7WcyyODWtNd+eQOLARRssGNZAP4C9gH90iVRFAW1aU+NeA76oceXE5Kiiqoc8T30wE5FC6/UbTjQYRH520NF4wcCQKm//iH8o5uI2+NxZW4goeuShibXK9sijFVNXxUuTeXTmaSJjEPyB+pnmPwjzw+qjhkJJADefh9oryy5+t9ecCwXDiI/2ce2n1Vawm/Nq6/0rZMUSsF8XSiTFczKMunuGMhxGEyyx/I8NZd4XMXGSnBo0YZF7jR9+eRHIjuenPHq1kfEid2Ps4fhFSE8mEecnK7w5xE3r0XeTNHQcTId1yYneK/LQfcRkzInuRddytTwTAmsoSjROcjKjAvtyZSM81pFWJsMQ7bSVXOC0K2wvEz9khDT0RIoR/8tMh2G737F15raTe9Ggbgy3DHst4mYIpoWV/slHrOF0vR9j7X+MRN9R1cVtI1coof/tVSWQsLvv0AJfB4/6dUl+i/yNO/j+4c3WolGwqyXd+oxsZK1VrSwSCBZwBO17BmePJL2QsPVRdutq06TrlvGqP4wXySH9LRuHr3sWgr2VuDV00w+UvuU7ExI+16dWh7jrn/rvIBQSJlHDhl5+VpyM0WTMy5kSfO6nits73ZzT7BAoSU7AeQOMj3t+cUiEq9f9dk7em7QxWMuWg6QIJ+ZZ2+CCBms4rSE4x2glOxanNX/HktQg==",
"username": "AgCxJKzhsF7yNJesK5oLJP62kjFnX4UUNQ2NrHl02Hv6MAzi/AUEV3uJSXXIi3H/uMJSMxRpJQjIDsrznYVI0YHOoz1M8/y1dx8xotFv/i0XByI9sMuGtesop7ncmQbEPMaJ3pqTJyaGkEwcsEMGmwwYiRfJHmEhhCYtzEc5IAnx+nmk//HYsrSWKpJGSWl0LvdMJsnsTxrWoJjaYTW3J0Of3VOOmgkuwIFKyXW9S2cUbAco8xVYchbyiHc8LXbS3izyAidRzg1OWyqvTGMIKJDQZ3ibIiXheon5ZeYjj0fkEkv3TrB7WoKdo0090OY1eHabqAPHT8aP+WG1g6TAzbJEtg+zFfYDKIw5Tp1WkRlsD2me4HycGuZbsaXgP5vWlxF5+rULUzUgxfmTRmYTl0H8kIlmUrusZwxR5ZXnSuBJ3n3AMEjmpmTTALakxEFEPDJJoVbgcViLtANwk72yu15FlOxczT22uyW8FMkj9kYzcq/+2a/EjaTo62SnUYJ3UTQXvgMKML1yJD+zym2+xscPNmwZFBPN5BQ/64ru/Z51nWB20fWFgW3Rw67jEQMajmVclmUcASWOjHzO87feEprHeilTH+224IHzpmC4aLz/JtIP9EEvqfDUr3fRrxcgtT1DgxV37vPj6Pqn47MHr39AA850CxjFmb1VcwfH6ygXABFlxnVByZDn7xCyBNswtKJqtw=="
}
}
}

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-nfs
spec:
capacity:
storage: "150Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/nextcloud
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
volumeName: nextcloud-nfs

View File

@@ -1,171 +0,0 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
image:
tag: "28"
ingress:
enabled: false
nextcloud:
host: nextcloud.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: true
## Supported database engines: mysql or postgresql
type: postgresql
## Database host
host: postgres-postgresql.postgres
## Database user
# user: nextcloud
# ## Database password
# password: test
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: true
secretName: postgres-password
usernameKey: username
passwordKey: password
##
## MariaDB chart configuration
##
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron
##
cronjob:
enabled: false
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-nfs
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: true
# disable when upgrading from a previous chart version
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: websocket
@@ -9,7 +9,7 @@ spec:
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ntfy-ingressroute

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: binwiederhier/ntfy
newName: binwiederhier/ntfy
newTag: v2.8.0
newTag: v2.10.0

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
spec:
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: mealie
image: mealie
resources:
limits:
memory: "500Mi"
cpu: "500m"
ports:
- containerPort: 9000
env:
- name: ALLOW_SIGNUP
value: "true"
- name: TZ
value: Europe/Paris
- name: BASE_URL
value: https://recipes.kluster.moll.re
volumeMounts:
- name: mealie-data
mountPath: /app/data
volumes:
- name: mealie-data
persistentVolumeClaim:
claimName: mealie

16
apps/recipes/ingress.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mealie-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`recipes.kluster.moll.re`)
kind: Rule
services:
- name: mealie-web
port: 9000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: recipes
resources:
- namespace.yaml
- deployment.yaml
- pvc.yaml
- service.yaml
- ingress.yaml
images:
- name: mealie
newTag: v1.6.0
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/recipes/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce

10
apps/recipes/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie-web
spec:
selector:
app: mealie
ports:
- port: 9000
targetPort: 9000

View File

@@ -18,9 +18,9 @@ spec:
ports:
- containerPort: 7070
volumeMounts:
- name: rss-data
- name: data
mountPath: /data
volumes:
- name: rss-data
- name: data
persistentVolumeClaim:
claimName: rss-claim
claimName: data

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
@@ -14,4 +14,3 @@ spec:
port: 80
tls:
certResolver: default-tls

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
name: placeholder

View File

@@ -1,9 +1,9 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: rss-claim
name: data
spec:
storageClassName: nfs-client
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:

6
apps/todos/README.md Normal file
View File

@@ -0,0 +1,6 @@
### Adding a user
```bash
kubectl exec -it -n todos deployments/todos-vikunja -- /app/vikunja/vikunja user create -u <username> -e "<user-email>"
```
Password will be prompted.

21
apps/todos/ingress.yaml Normal file
View File

@@ -0,0 +1,21 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: todos-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`todos.kluster.moll.re`) && PathPrefix(`/api/v1`)
kind: Rule
services:
- name: todos-api
port: 3456
- match: Host(`todos.kluster.moll.re`) && PathPrefix(`/`)
kind: Rule
services:
- name: todos-frontend
port: 80
tls:
certResolver: default-tls

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: todos
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
# helmCharts:
# - name: vikunja
# version: 0.1.5
# repo: https://charts.oecis.io
# valuesFile: values.yaml
# releaseName: todos
# managed by argocd directly

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/todos/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce

51
apps/todos/values.yaml Normal file
View File

@@ -0,0 +1,51 @@
######################
# VIKUNJA COMPONENTS #
######################
# You can find the default values that this `values.yaml` overrides, in the comment at the top of this file.
api:
enabled: true
image:
tag: 0.22.1
persistence:
# This is your Vikunja data will live, you can either let
# the chart create a new PVC for you or provide an existing one.
data:
enabled: true
existingClaim: data
accessMode: ReadWriteOnce
size: 10Gi
mountPath: /app/vikunja/files
ingress:
main:
enabled: false
configMaps:
# The configuration for Vikunja's api.
# https://vikunja.io/docs/config-options/
config:
enabled: true
data:
config.yml: |
service:
frontendUrl: https://todos.kluster.moll.re
database:
type: sqlite
path: /app/vikunja/files/vikunja.db
registration: false
env:
frontend:
enabled: true
image:
tag: 0.22.1
ingress:
main:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
typesense:
enabled: false

View File

@@ -1,13 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
namespace: whoami
name: whoami-ingressroute
annotations:
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.kluster.moll.re`)
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`)
kind: Rule
services:
- name: whoami

View File

@@ -12,7 +12,9 @@ spec:
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
# selfHeal: true
# syncPolicy:
# automated:
# prune: true
# selfHeal: false
# DO NOT AUTO SYNC THE APP OF APPS.
# all other apps are auto-synced, but adding new apps should be done manually.

View File

@@ -1,5 +1,5 @@
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingressroute

View File

@@ -0,0 +1,15 @@
# How to restore
1. Port forward the rest api for gcloud
```bash
kubectl port-forward -n backup service/rclone-gcloud 8000
```
2. Load the snapshots locally
```bash
restic -r rest:http://127.0.0.1:8000/kluster mount /mnt/restic
```
(The password is in a secret)
3. Copy relevant files to the correct location on the NAS

View File

@@ -4,7 +4,7 @@ kind: Kustomization
namespace: backup
# nameSuffix: -backup
resources:
- ../../base
- ../../cronjobs-base
# - ./restic-commands.yaml

Some files were not shown because too many files have changed in this diff Show More