278 Commits

Author SHA1 Message Date
ac4d2c3fa3 using an alternative stack 2024-12-02 13:46:04 +01:00
c237e060fd add incomplete deployment 2024-05-13 14:28:37 +02:00
0eaa9fe774 empty line removed 2024-05-13 14:26:53 +02:00
192e2e869f minecraft 2024-05-13 14:25:49 +02:00
0fd9936db5 gitea runner improvements 2024-05-13 14:25:49 +02:00
1a9d0fc00c Merge pull request 'Update jellyfin/jellyfin Docker tag to v10.9.0' (#94) from renovate/jellyfin-jellyfin-10.x into main
Reviewed-on: #94
2024-05-12 11:07:57 +00:00
a8dfca3c43 Update jellyfin/jellyfin Docker tag to v10.9.0 2024-05-11 19:01:08 +00:00
42e2bc35a5 Merge pull request 'Update ghcr.io/gethomepage/homepage Docker tag to v0.8.13' (#90) from renovate/ghcr.io-gethomepage-homepage-0.x into main
Reviewed-on: #90
2024-05-10 08:46:45 +00:00
7e2e5a56db Merge branch 'main' into renovate/ghcr.io-gethomepage-homepage-0.x 2024-05-10 08:45:47 +00:00
01279dd023 Merge pull request 'Update octodns/octodns Docker tag to v2024.05' (#91) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #91
2024-05-08 13:29:51 +00:00
d6ce07a8a0 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.6.0' (#92) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #92
2024-05-08 13:28:59 +00:00
6eb617086a Update ghcr.io/mealie-recipes/mealie Docker tag to v1.6.0 2024-05-07 12:00:58 +00:00
8137bf8f1b Update apps/immich/kustomization.yaml 2024-05-06 17:59:00 +00:00
5f1dcaabba Update octodns/octodns Docker tag to v2024.05 2024-05-06 15:30:45 +00:00
37bdb32f43 Update ghcr.io/gethomepage/homepage Docker tag to v0.8.13 2024-05-06 05:30:44 +00:00
ca15a6497c Add apps/media/ingress.yaml 2024-05-04 12:10:12 +00:00
095d2d6392 remove limits 2024-05-04 12:47:10 +02:00
b2993c9395 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.5' (#86) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #86
2024-05-04 09:06:57 +00:00
d7b0f658de Merge pull request 'Update actualbudget/actual-server Docker tag to v24.5.0' (#89) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #89
2024-05-04 09:06:26 +00:00
391c71729b Update actualbudget/actual-server Docker tag to v24.5.0 2024-05-03 17:00:53 +00:00
bee5dd0c0b Update owncloud/ocis Docker tag to v5.0.3 2024-05-02 16:30:46 +00:00
25ab46e69a change base image for k8s conformity 2024-05-02 17:26:36 +02:00
123412e073 small naming mistake 2024-05-02 17:15:30 +02:00
39818887fa add gitea actions 2024-05-02 17:12:43 +02:00
0700609568 Update homeassistant/home-assistant Docker tag to v2024.5 2024-05-01 19:30:44 +00:00
198b24132e Update Helm release metallb to v0.14.5 2024-04-27 09:13:11 +00:00
f6e45d089b Update docker.io/bitnami/sealed-secrets-controller Docker tag to v0.26.2 2024-04-27 09:12:35 +00:00
23eab57208 Update Helm release cloudnative-pg to v0.21.0 2024-04-25 12:01:00 +00:00
a94521f197 update ocis 2024-04-21 12:59:19 +02:00
38f58d86c9 new versiob 2024-04-21 12:26:44 +02:00
76d1c51157 improve thanos/prometheus retention 2024-04-20 19:04:44 +02:00
7aaeeded89 update and improve grafana 2024-04-20 18:37:19 +02:00
9b93016f93 bump immich version 2024-04-20 18:34:52 +02:00
aaf624bb42 bump immich version 2024-04-20 18:27:01 +02:00
8536d91288 Update Helm release immich to v0.6.0 2024-04-20 16:26:29 +00:00
3f62bee199 reduce gitea load by ditching redis 2024-04-20 18:22:16 +02:00
f9f39818a1 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.1' (#77) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #77
2024-04-17 16:29:25 +00:00
a73e6dc4db Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.1 2024-04-17 14:01:07 +00:00
1df7abf987 Merge pull request 'Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12' (#76) from renovate/ghcr.io-gethomepage-homepage-0.x into main
Reviewed-on: #76
2024-04-17 12:54:36 +00:00
0e1bb58c24 Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12 2024-04-17 12:54:36 +00:00
fcd2d2eaa2 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.0' (#75) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #75
2024-04-17 12:54:14 +00:00
455790d3c6 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.5.0 2024-04-16 16:31:00 +00:00
cdbcdba25d Update Helm release gitea to v10.1.4 2024-04-16 10:58:27 +00:00
9dcb06678b remove old filesync deployments (nextcloud) 2024-04-16 12:56:54 +02:00
a4fe0a7fe4 add homepage as a deployment 2024-04-16 12:43:33 +02:00
ece9faa60c Merge pull request 'Update octodns/octodns Docker tag to v2024.04' (#72) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #72
2024-04-16 07:54:27 +00:00
d4bea2994c Merge pull request 'Update Helm release traefik to v27' (#66) from renovate/traefik-27.x into main
Reviewed-on: #66
2024-04-16 07:53:47 +00:00
0ec3bf9ea8 Update Helm release traefik to v27 2024-04-12 08:01:16 +00:00
0c5760b22b Update octodns/octodns Docker tag to v2024.04 2024-04-10 16:30:48 +00:00
e144722d59 fix cnpg syncing issues 2024-04-10 14:01:57 +02:00
bf6e7aa10c mabye like that? 2024-04-06 14:33:57 +02:00
ae53c44428 fix servicemonitors 2024-04-06 14:24:06 +02:00
05d5b02347 Update actualbudget/actual-server Docker tag to v24.4.0 2024-04-06 12:22:05 +00:00
337237a0f8 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.4.0 2024-04-06 12:21:39 +00:00
ccc4b13c35 Update adguard/adguardhome Docker tag to v0.107.48 2024-04-06 12:13:15 +00:00
a6a9c7c217 update home assistant and fix prometheus 2024-04-06 14:12:07 +02:00
bc0f29f028 update immich 2024-04-03 14:11:08 +02:00
e2c9d73728 update to dashboards 2024-04-01 13:23:46 +02:00
442c07f031 bad configmap 2024-04-01 13:11:11 +02:00
8fd9fa6f11 better dashboards 2024-04-01 12:21:50 +02:00
516d7e8e09 like that? 2024-04-01 11:57:06 +02:00
acf9d34b10 Merge branch 'main' of ssh://git.kluster.moll.re:2222/remoll/k3s-infra 2024-04-01 11:47:11 +02:00
3ffead0a14 try fixing grafana 2024-04-01 11:47:01 +02:00
b6bdc09efc Update docker.io/bitnami/sealed-secrets-controller Docker tag to v0.26.1 2024-04-01 09:33:23 +00:00
49b21cde52 proper backup config 2024-03-31 19:37:18 +02:00
deed24aa01 try fixing homeassistant again 2024-03-31 19:28:19 +02:00
9cfb98248d update immich 2024-03-31 19:08:14 +02:00
7bc4beefce Update Helm release cloudnative-pg to v0.20.2 2024-03-31 15:19:09 +00:00
ce9ff68c26 Update binwiederhier/ntfy Docker tag to v2.10.0 2024-03-31 15:18:06 +00:00
8249e7ef01 Update adguard/adguardhome Docker tag to v0.107.46 2024-03-31 15:15:00 +00:00
14e65df483 Update Helm release metallb to v0.14.4 2024-03-31 15:14:18 +00:00
f6fef4278b enable wal for grafana? 2024-03-29 00:55:34 +01:00
ef50df8386 slight mistake 2024-03-28 19:45:27 +01:00
b6df7604ed add missing references 2024-03-28 19:22:59 +01:00
a03d869d0c added dashboards 2024-03-28 19:20:28 +01:00
1063349fbe use sealedsecret 2024-03-28 19:17:19 +01:00
b88c212b57 now with correct secret 2024-03-28 19:10:01 +01:00
38a522a8d6 cleaner monitoring 2024-03-28 19:07:42 +01:00
046936f8f6 fix 2024-03-28 14:04:07 +01:00
309cbc08f5 so? 2024-03-28 13:55:57 +01:00
08b4c7eb5e switch ocis to nfs-provisioner 2024-03-28 13:52:44 +01:00
58e632e0b8 migrate mealie pvc 2024-03-28 13:21:50 +01:00
30d02edebc update rss 2024-03-28 13:19:53 +01:00
e30bfe64ae dum dum 2024-03-28 12:59:51 +01:00
764a3eafb7 switch some apps over to nfs-client 2024-03-28 12:40:48 +01:00
eff07665de add nfs-provisioner with sensible path template 2024-03-28 12:29:16 +01:00
571aebe78d now? 2024-03-27 14:15:13 +01:00
91a2ae5fe8 annoying 2024-03-27 14:13:22 +01:00
f12c21ef18 update vikunja 2024-03-27 14:03:55 +01:00
2a96b288bf or like that? 2024-03-27 09:39:58 +01:00
6f3a5aeab2 okey 2024-03-27 09:37:51 +01:00
b001bd3efc maybe like that? 2024-03-27 09:36:22 +01:00
b54794df35 dum-dum 2024-03-27 09:19:00 +01:00
51c8f7c092 fix the db location 2024-03-27 09:15:25 +01:00
cfb1a87a5b now with correct api path 2024-03-27 09:07:01 +01:00
10483431c6 trying todos like that 2024-03-27 09:04:40 +01:00
3a9450da9d now? 2024-03-27 08:34:48 +01:00
374e23ba1e trying to fix immich 2024-03-27 08:32:46 +01:00
66f703f5e1 update to correct location 2024-03-27 08:25:53 +01:00
4b05b53d72 small fixes 2024-03-27 00:38:34 +01:00
cfbc7fcd0d disable typesense 2024-03-27 00:31:41 +01:00
ffed2aea50 add media back 2024-03-27 00:27:57 +01:00
e674bf5b94 slim down the file sync 2024-03-27 00:12:50 +01:00
133af74ae0 missing namespace resource 2024-03-27 00:05:55 +01:00
f648064304 remove nfs-client 2024-03-26 23:50:27 +01:00
c7180f793a trying like that 2024-03-26 22:58:17 +01:00
4fcdaad297 move prometheus to its own config 2024-03-26 22:13:02 +01:00
f4b99ca037 now perhaps? 2024-03-26 11:16:33 +01:00
588bf774f9 or like that? 2024-03-26 10:58:44 +01:00
e18c661dbd typo 2024-03-26 10:57:18 +01:00
7d65ffea6a remove ocis:// 2024-03-26 10:56:34 +01:00
e460b5324a try differently configured todos 2024-03-26 10:55:25 +01:00
6fe166e60c manage todos 2024-03-24 15:31:59 +01:00
6ceb3816fb cleanup with regards to upcoming migration 2024-03-23 11:45:11 +01:00
19b63263e6 whoopsie 2024-03-22 14:57:17 +01:00
20d46d89d2 also manage ocis 2024-03-22 14:54:30 +01:00
7aee6c7cf0 basic auth maybe? 2024-03-22 14:53:29 +01:00
443da20ff9 steps towards a completely managed cluster 2024-03-20 23:45:08 +01:00
84a47b15b6 increase renovate frequency 2024-03-12 21:28:35 +01:00
40259ee57e Update apps/immich/kustomization.yaml 2024-03-12 14:01:08 +00:00
619368a2fd Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.3' (#54) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #54
2024-03-12 09:04:37 +00:00
3288966b95 Merge pull request 'Update octodns/octodns Docker tag to v2024.03' (#55) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #55
2024-03-12 09:04:16 +00:00
d12d50b906 Update apps/immich/kustomization.yaml 2024-03-12 09:03:55 +00:00
c7f0221062 Update octodns/octodns Docker tag to v2024.03 2024-03-12 09:02:04 +00:00
7819867091 Update homeassistant/home-assistant Docker tag to v2024.3 2024-03-12 09:01:41 +00:00
dd4c3d7a36 Update apps/immich/kustomization.yaml 2024-03-12 08:37:11 +00:00
e66905402e Merge pull request 'Update Helm release immich to v0.4.0' (#47) from renovate/immich-0.x into main
Reviewed-on: #47
2024-03-12 08:35:56 +00:00
1bdb4522c3 Merge pull request 'Update ghcr.io/mealie-recipes/mealie Docker tag to v1.3.2' (#53) from renovate/ghcr.io-mealie-recipes-mealie-1.x into main
Reviewed-on: #53
2024-03-12 08:32:10 +00:00
b5845479c2 Update ghcr.io/mealie-recipes/mealie Docker tag to v1.3.2 2024-03-10 19:01:42 +00:00
f2f31c4f4e Merge pull request 'Update binwiederhier/ntfy Docker tag to v2.9.0' (#52) from renovate/binwiederhier-ntfy-2.x into main
Reviewed-on: #52
2024-03-10 09:57:10 +00:00
ded829500c Update binwiederhier/ntfy Docker tag to v2.9.0 2024-03-09 11:04:03 +00:00
f762f5451b Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.45' (#51) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #51
2024-03-08 07:42:50 +00:00
709f21998e Update adguard/adguardhome Docker tag to v0.107.45 2024-03-07 18:01:21 +00:00
47f091be83 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.3.0' (#48) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #48
2024-03-07 17:31:17 +00:00
da8be916bf fix bad naming 2024-03-07 13:21:05 +01:00
ad67acb9e7 again 2024-03-07 13:17:50 +01:00
5a7b5a82d7 maybe the service was misconfigured 2024-03-07 13:16:14 +01:00
2c32db61ec why? 2024-03-07 13:13:54 +01:00
141b80d15c man... 2024-03-07 13:11:08 +01:00
bf1d4badbe or directly use the dns name 2024-03-07 13:08:29 +01:00
be48049e22 fix bad syntax 2024-03-07 13:01:21 +01:00
3a629284f3 perhaps now 2024-03-07 12:59:04 +01:00
28c92e727f last chance 2024-03-06 14:48:14 +01:00
9a65c531f1 now? 2024-03-06 14:37:23 +01:00
52a086df73 come on 2024-03-06 14:34:19 +01:00
b728e21a15 expose grpc of store 2024-03-06 14:31:04 +01:00
da32c9c2ce neew 2024-03-06 14:25:47 +01:00
846390600e let's try with query as well 2024-03-06 14:24:07 +01:00
18d7a6b4cb or maybe like that? 2024-03-06 11:34:15 +01:00
31c8e91502 actually don't specify data 2024-03-06 11:31:15 +01:00
f0adf6b5db change user of prometheus to make thanos happy 2024-03-06 08:14:55 +01:00
b24ae9c698 with correct image 2024-03-05 16:44:42 +01:00
f3c108e362 fix 2024-03-05 16:41:54 +01:00
d2a8d92864 also use thanos object store 2024-03-05 16:39:15 +01:00
10816c4bd9 Update actualbudget/actual-server Docker tag to v24.3.0 2024-03-03 20:01:34 +00:00
aca0d4ba21 Update Helm release immich to v0.4.0 2024-03-03 20:01:27 +00:00
1ad56fd27e Merge pull request 'Update Helm release traefik to v26.1.0' (#42) from renovate/traefik-26.x into main
Reviewed-on: #42
2024-03-03 19:33:13 +00:00
773a155627 Update Helm release traefik to v26.1.0 2024-03-03 19:33:13 +00:00
61945b3507 Merge pull request 'Update Helm release metallb to v0.14.3' (#34) from renovate/metallb-0.x into main
Reviewed-on: #34
2024-03-03 19:32:16 +00:00
4aa21cb0cd Update Helm release metallb to v0.14.3 2024-03-03 19:32:16 +00:00
d233ab96eb Merge pull request 'Update Helm release gitea to v10.1.3' (#46) from renovate/gitea-10.x into main
Reviewed-on: #46
2024-03-03 19:31:04 +00:00
df581e0110 Update Helm release gitea to v10.1.3 2024-03-03 19:31:04 +00:00
8a114b9384 remove homarr 2024-03-03 20:30:06 +01:00
ab6506f4f2 update immich 2024-02-21 18:35:13 +01:00
87242d293a Merge pull request 'Update Helm release homarr to v1.0.6' (#38) from renovate/homarr-1.x into main
Reviewed-on: #38
2024-02-13 10:34:15 +00:00
11d46ec295 Merge pull request 'Update Helm release gitea to v10.1.1' (#35) from renovate/gitea-10.x into main
Reviewed-on: #35
2024-02-13 10:33:42 +00:00
1b3702c4c8 Update Helm release gitea to v10.1.1 2024-02-13 10:33:42 +00:00
9b68b4a915 lets be more generous with memory 2024-02-11 18:15:11 +01:00
18889d7391 add other recipes 2024-02-11 11:28:30 +01:00
a38ad1d7e6 bye bye 2024-02-10 19:35:22 +01:00
edcb9158f5 what now? 2024-02-10 19:21:04 +01:00
71b1c252f3 turns out it was important 2024-02-10 19:17:28 +01:00
b30f44d2c6 last chance 2024-02-10 19:16:08 +01:00
85abf0fda6 with services? 2024-02-10 19:04:08 +01:00
5e21ceaad3 lets try this 2024-02-10 18:58:20 +01:00
3f5c1a5a5c add configmap 2024-02-10 10:56:59 +01:00
0195833fc3 service account not needed 2024-02-10 10:54:41 +01:00
64835e16de slight fix 2024-02-10 10:53:20 +01:00
4e11a33855 correct backend 2024-02-10 10:46:38 +01:00
bad024861a add recipes 2024-02-10 10:45:53 +01:00
fe5d6a9014 Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.44' (#39) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #39
2024-02-08 09:24:43 +00:00
f2898d7e0b Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.2' (#40) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #40
2024-02-08 09:24:05 +00:00
f67f0c8889 Update homeassistant/home-assistant Docker tag to v2024.2 2024-02-07 21:02:14 +00:00
0ccb17d8e1 Update adguard/adguardhome Docker tag to v0.107.44 2024-02-07 11:01:45 +00:00
bb6d417937 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.2.0' (#36) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #36
2024-02-07 10:09:46 +00:00
4e2ebe2540 Merge pull request 'Update octodns/octodns Docker tag to v2024' (#37) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #37
2024-02-07 10:09:26 +00:00
c5310b0f00 Update Helm release homarr to v1.0.6 2024-02-04 17:01:35 +00:00
46ef973f70 Update octodns/octodns Docker tag to v2024 2024-02-03 22:02:18 +00:00
c12d2dc7a6 whoopsie 2024-02-03 22:27:29 +01:00
e28c6ffd52 add physics 2024-02-03 22:19:09 +01:00
7ba6860ea0 Update actualbudget/actual-server Docker tag to v24.2.0 2024-02-03 21:01:51 +00:00
33c23ee42b Merge pull request 'Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1' (#31) from renovate/ghcr.io-immich-app-immich-machine-learning-1.x into main
Reviewed-on: #31
2024-02-03 20:58:07 +00:00
b2f8c8bced Merge branch 'main' into renovate/ghcr.io-immich-app-immich-machine-learning-1.x 2024-02-03 20:57:54 +00:00
d5277d3d6a Merge pull request 'Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1' (#32) from renovate/ghcr.io-immich-app-immich-server-1.x into main
Reviewed-on: #32
2024-02-03 20:56:19 +00:00
e3c90f5ede Merge branch 'main' into renovate/ghcr.io-immich-app-immich-server-1.x 2024-02-03 20:55:47 +00:00
eb5bda63db Merge pull request 'Update Helm release grafana to v7.3.0' (#26) from renovate/grafana-7.x into main
Reviewed-on: #26
2024-02-03 20:54:45 +00:00
a10a216f0e Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1 2024-01-31 20:01:05 +00:00
3cf9fd0b87 Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1 2024-01-31 20:01:03 +00:00
ea1fa1637f Update Helm release grafana to v7.3.0 2024-01-30 15:00:50 +00:00
96abe2a0f5 auto admin 2024-01-23 18:16:40 +01:00
9623f33b59 Merge pull request 'Update Helm release gitea to v10' (#16) from renovate/gitea-10.x into main
Reviewed-on: #16
2024-01-22 10:30:17 +00:00
b065fc7e59 idioto 2024-01-22 11:27:58 +01:00
617ed5601c allow renovate to fetch release notes 2024-01-22 11:11:34 +01:00
7e21ce4181 Update Helm release gitea to v10 2024-01-22 10:00:35 +00:00
eeaed091ab Merge pull request 'Update Helm release metallb to v0.13.12' (#30) from renovate/metallb-0.x into main
Reviewed-on: #30
2024-01-16 08:59:45 +00:00
ee52d2b777 Update Helm release metallb to v0.13.12 2024-01-15 19:00:31 +00:00
384e9fbaec no service account needed 2024-01-15 19:12:19 +01:00
606aded35f argo manage metallb 2024-01-15 19:03:49 +01:00
a3aa8888e9 or like that? 2024-01-14 17:31:24 +01:00
aaeb43e9c3 let's check if we get ips like that 2024-01-14 17:27:37 +01:00
a9b1d02a7e keeping some ips here 2024-01-14 17:22:57 +01:00
76b49270eb fix type 2024-01-14 12:58:42 +01:00
9b57715f92 bad yaml 2024-01-14 12:56:23 +01:00
85a96cf87b bump version 2024-01-14 12:54:33 +01:00
78b4be8fbd next try 2024-01-14 12:51:14 +01:00
7bc10b57ce lets try adding thanos 2024-01-14 12:41:03 +01:00
de26a052e8 QOL improvements 2024-01-11 22:05:05 +01:00
28ff769757 Deploy full on octodns 2024-01-11 21:57:02 +01:00
6a58ea337e forgot secret 2024-01-11 21:38:24 +01:00
2af279c161 still crashes, now due to auth 2024-01-11 21:37:29 +01:00
c26997ff83 single run only 2024-01-11 18:39:13 +01:00
a354464f6e try with local directory 2024-01-11 18:26:37 +01:00
268a9f3a7a correct env vars and labels 2024-01-11 18:12:12 +01:00
4ddeaf6c99 try this 2024-01-11 18:08:35 +01:00
b6f9a818af Execute 2nd command as well 2024-01-11 18:04:55 +01:00
f4670aa471 Add ddns 2024-01-11 17:59:56 +01:00
72a2914c24 correct git target 2024-01-11 17:52:29 +01:00
1d5bc8a9c1 why? 2024-01-11 17:51:01 +01:00
892c412fd9 let's tune it down 2024-01-11 17:46:25 +01:00
b6f7ead955 whoopsie 2024-01-11 17:44:58 +01:00
f033ba16eb correct version 2024-01-11 17:43:31 +01:00
f3ae2c424b use octodns 2024-01-11 17:42:35 +01:00
36035ee84d bump immich version 2024-01-11 10:08:12 +01:00
50679b400a Merge pull request 'Update actualbudget/actual-server Docker tag to v24' (#28) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #28
2024-01-10 16:08:35 +00:00
a68fb5f0a7 Update actualbudget/actual-server Docker tag to v24 2024-01-10 13:00:43 +00:00
5792367b8b Add finance to auto deploy 2024-01-10 13:15:42 +01:00
3699b79f1a let's try these monitorings 2024-01-08 15:48:38 +01:00
e473abda12 Merge pull request 'Update Helm release grafana to v7.0.21' (#25) from renovate/grafana-7.x into main
Reviewed-on: #25
2024-01-08 13:01:14 +00:00
f67f586006 Update Helm release grafana to v7.0.21 2024-01-08 10:00:33 +00:00
61e1276f02 maybe like that 2024-01-07 12:30:51 +01:00
111fd35fc3 needed? 2024-01-07 12:18:06 +01:00
cc4148fb8a correct crds 2024-01-07 12:16:47 +01:00
f1e624985f come on 2024-01-07 12:15:10 +01:00
c8d7d3c854 use traefik 2024-01-07 12:12:46 +01:00
4880503609 Is actually a token 2024-01-07 12:06:53 +01:00
f905ce1611 maybe it wes a token actually? 2024-01-07 12:05:42 +01:00
ecfc65ecdd try like this? 2024-01-07 11:59:41 +01:00
7da1d705a4 update authorization 2024-01-07 11:51:20 +01:00
299cbea97e change ingress slightly 2024-01-07 11:41:05 +01:00
b633d61920 update whoami 2024-01-07 11:39:10 +01:00
bfb8244e59 made a dum dum 2024-01-07 11:37:38 +01:00
33c2df9fa3 add external dns 2024-01-07 11:35:52 +01:00
3d84d6bed1 does servicemonitor accept this? 2024-01-04 18:29:18 +01:00
cf6a931097 fix port names 2024-01-04 18:27:03 +01:00
53c3865072 fix label syntax 2024-01-04 18:23:32 +01:00
d09a3509af trying to monitor syncthing 2024-01-04 18:21:26 +01:00
8c0abc16c4 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024' (#24) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #24
2024-01-04 08:45:45 +00:00
399969677f Merge pull request 'Update Helm release immich to v0.3.1' (#22) from renovate/immich-0.x into main
Reviewed-on: #22
2024-01-04 08:44:55 +00:00
762756310a Update homeassistant/home-assistant Docker tag to v2024 2024-01-03 21:00:38 +00:00
ec964be7c3 whoopsie 2023-12-31 18:49:54 +01:00
0603da76b2 update gitea metric collection 2023-12-31 18:40:57 +01:00
a437c4228e update some scraping config 2023-12-31 18:26:45 +01:00
d5aab95186 try as a string 2023-12-31 17:58:15 +01:00
3acb329730 try again 2023-12-31 17:55:22 +01:00
73ce4e340f try again 2023-12-31 17:44:42 +01:00
0d4b6f4605 remove label requiremetns 2023-12-31 17:37:51 +01:00
deeb35bbb6 test monitoring 2023-12-31 17:34:11 +01:00
d4c658a28c match all servicemonitors? 2023-12-31 17:13:58 +01:00
1fcebe033b fix annotations 2023-12-31 17:06:13 +01:00
8fe51863f4 fix tag 2023-12-30 10:48:46 +01:00
c4eda4e75d fix tag 2023-12-30 10:45:23 +01:00
9490015728 maybe like that? 2023-12-30 10:42:23 +01:00
a641df167f remove port names 2023-12-30 10:39:55 +01:00
21d100fb62 update service config 2023-12-30 10:38:59 +01:00
26b06c553a deploy syncthing 2023-12-30 10:30:05 +01:00
d51bfcf7db Merge pull request 'Update Helm release homarr to v1.0.4' (#23) from renovate/homarr-1.x into main
Reviewed-on: #23
2023-12-27 17:27:57 +00:00
788c2436fc Update Helm release homarr to v1.0.4 2023-12-27 17:00:32 +00:00
c9f883eaa6 Update Helm release immich to v0.3.1 2023-12-23 16:00:31 +00:00
192 changed files with 2110 additions and 2458 deletions

6
.gitignore vendored
View File

@@ -1,2 +1,6 @@
# Kubernetes secrets
*.secret.yaml
charts/
main.key
# Helm Chart files
charts/

6
.gitmodules vendored Normal file
View File

@@ -0,0 +1,6 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git
[submodule "apps/monitoring/dashboards"]
path = apps/monitoring/dashboards
url = ssh://git@git.kluster.moll.re:2222/remoll/grafana-dashboards.git

View File

@@ -1,11 +1,9 @@
# Kluster setup and IaaC using argoCD
### Initial setup
#### Requirements:
- A running k3s instance run:
- `metalLB` deployed
- A running k3s instance
- `sealedsecrets` deployed
#### Installing argo and the app-of-apps
@@ -29,5 +27,21 @@ The app-of-apps will bootstrap a fully featured cluster with the following compo
- immich
- ...
#### Recap
- install sealedsecrets see [README](./infrastructure/sealedsecrets/README.md)
```bash
kubectl apply -k infrastructure/sealedsecrets
kubectl apply -f infrastructure/sealedsecrets/main.key
kubectl delete pod -n kube-system -l name=sealed-secrets-controller
```
- install argocd
```bash
kubectl apply -k infrastructure/argocd
```
- wait...
### Adding an application
todo

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: adguard-tls-ingress

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.43
newTag: v0.107.48
namespace: adguard

View File

@@ -24,6 +24,8 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-tcp
nodePort: 31306
@@ -46,6 +48,7 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-udp
nodePort: 30547

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ocis-statefulset
spec:
selector:
matchLabels:
app: ocis
serviceName: ocis-web
replicas: 1
template:
metadata:
labels:
app: ocis
spec:
containers:
- name: ocis
image: ocis
resources:
limits:
memory: "1Gi"
cpu: "1000m"
env:
- name: OCIS_INSECURE
value: "true"
- name: OCIS_URL
value: "https://ocis.kluster.moll.re"
- name: OCIS_LOG_LEVEL
value: "debug"
ports:
- containerPort: 9200
volumeMounts:
- name: config
mountPath: /etc/ocis
# - name: ocis-config-file
# mountPath: /etc/ocis/config.yaml
- name: data
mountPath: /var/lib/ocis
volumes:
# - name: ocis-config
# persistentVolumeClaim:
# claimName: ocis-config
- name: config
secret:
secretName: ocis-config
- name: data
persistentVolumeClaim:
claimName: ocis

18
apps/files/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ocis-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`ocis.kluster.moll.re`)
kind: Rule
services:
- name: ocis-web
port: 9200
scheme: https
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- service.yaml
- pvc.yaml
- deployment.yaml
- ocis-config.sealedsecret.yaml
namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "5.0.3"

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +1,11 @@
```
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
name: ocis
spec:
storageClassName: nfs-client
storageClassName: "nfs-client"
accessModes:
- ReadWriteMany
- ReadWriteOnce
resources:
requests:
storage: 1Mi
```
storage: 150Gi

10
apps/files/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: ocis-web
spec:
selector:
app: ocis
ports:
- port: 9200
targetPort: 9200

View File

@@ -1,12 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: finance
name: actualbudget
labels:
app: actualbudget
spec:
# deployment running a single container
selector:
matchLabels:
app: actualbudget
@@ -18,83 +16,19 @@ spec:
spec:
containers:
- name: actualbudget
image: actualbudget/actual-server:latest
image: actualbudget
imagePullPolicy: Always
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: actualbudget-data-nfs
- name: data
mountPath: /data
ports:
- containerPort: 5006
name: http
protocol: TCP
volumes:
- name: actualbudget-data-nfs
- name: data
persistentVolumeClaim:
claimName: actualbudget-data-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
# storageClassName: fast
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
# selector:
# matchLabels:
# directory: "journal-data"
---
apiVersion: v1
kind: Service
metadata:
namespace: finance
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: finance
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls
claimName: data

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "data"
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP

View File

@@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly-importer
name: firefly-importer
namespace: finance
spec:
selector:
matchLabels:
app: firefly-importer
template:
metadata:
labels:
app: firefly-importer
spec:
containers:
- image: fireflyiii/data-importer:latest
imagePullPolicy: Always
name: firefly-importer
resources: {}
ports:
- containerPort: 8080
env:
- name: FIREFLY_III_ACCESS_TOKEN
value: redacted
- name: FIREFLY_III_URL
value: firefly-http:8080
# - name: APP_URL
# value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
---
apiVersion: v1
kind: Service
metadata:
name: firefly-importer-http
namespace: finance
labels:
app: firefly-importer-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly-importer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-importer-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`importer.finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-importer-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly
name: firefly
namespace: finance
spec:
selector:
matchLabels:
app: firefly
template:
metadata:
labels:
app: firefly
spec:
containers:
- image: fireflyiii/core:latest
imagePullPolicy: Always
name: firefly
resources: {}
ports:
- containerPort: 8080
env:
- name: APP_ENV
value: "local"
- name: APP_KEY
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
- name: DB_CONNECTION
value: sqlite
- name: APP_URL
value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
volumeMounts:
- mountPath: /var/www/html/storage/database
name: firefly-database
volumes:
- name: firefly-database
persistentVolumeClaim:
claimName: firefly-database-nfs
---
apiVersion: v1
kind: Service
metadata:
name: firefly-http
namespace: finance
labels:
app: firefly-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: firefly-database-nfs
labels:
directory: firefly
spec:
# storageClassName: fast
# volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "1G"
nfs:
path: /firefly # inside nfs part.
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: firefly-database-nfs
spec:
resources:
requests:
storage: "1G"
# storageClassName: fast
accessModes:
- ReadOnlyMany
volumeName: firefly-database-nfs

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: finance
resources:
- namespace.yaml
- actualbudget.pvc.yaml
- actualbudget.deployment.yaml
- actualbudget.service.yaml
- actualbudget.ingress.yaml
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.5.0

View File

@@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homarr
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
helmCharts:
- name: homarr
releaseName: homarr
repo: https://oben01.github.io/charts/
version: 1.0.1
valuesFile: values.yaml

View File

View File

@@ -1,60 +0,0 @@
# -- Default values for homarr
# -- Declare variables to be passed into your templates.
# -- Number of replicas
replicaCount: 1
env:
# -- Your local time zone
TZ: "Europe/Berlin"
# -- Colors and preferences, possible values dark / light
DEFAULT_COLOR_SCHEME: "dark"
# -- Service configuration
service:
# -- Service type
type: ClusterIP
# -- Service port
port: 7575
# -- Service target port
targetPort: 7575
# -- Ingress configuration
ingress:
enabled: false
persistence:
- name: homarr-config
# -- Enable homarr-config persistent storage
enabled: true
# -- homarr-config storage class name
storageClassName: "nfs-client"
# -- homarr-config access mode
accessMode: "ReadWriteOnce"
persistentVolumeReclaimPolicy: Retain
# -- homarr-config storage size
size: "50Mi"
# -- homarr-config mount path inside the pod
mountPath: "/app/data/configs"
- name: homarr-database
# -- Enable homarr-database persistent storage
enabled: true
# -- homarr-database storage class name
storageClassName: "nfs-client"
# -- homarr-database access mode
accessMode: "ReadWriteOnce"
# -- homarr-database storage size
size: "50Mi"
# -- homarr-database mount path inside the pod
mountPath: "/app/database"
- name: homarr-icons
# -- Enable homarr-icons persistent storage
enabled: true
# -- homarr-icons storage class name
storageClassName: "nfs-client"
# -- homarr-icons access mode
accessMode: "ReadWriteOnce"
# -- homarr-icons storage size
size: "50Mi"
# -- homarr-icons mount path inside the pod
mountPath: "/app/public/icons"

View File

@@ -1,4 +1,3 @@
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -22,7 +21,7 @@ spec:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: config
- name: config-dir
mountPath: /config
resources:
requests:
@@ -32,6 +31,7 @@ spec:
cpu: "2"
memory: "1Gi"
volumes:
- name: config
- name: config-dir
persistentVolumeClaim:
claimName: homeassistant-nfs
claimName: config

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homeassistant-ingress
@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`home.kluster.moll.re`)
- match: Host(`home.kluster.moll.re`) && !Path(`/api/prometheus`)
middlewares:
- name: homeassistant-websocket
kind: Rule
@@ -15,9 +15,8 @@ spec:
port: 8123
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: homeassistant-websocket
@@ -27,6 +26,3 @@ spec:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@@ -9,8 +9,10 @@ resources:
- pvc.yaml
- service.yaml
- deployment.yaml
- servicemonitor.yaml
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2023.12"
newTag: "2024.5"

View File

@@ -1,28 +1,11 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: homeassistant-nfs
spec:
# storageClassName: slow
capacity:
storage: "1Gi"
# volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/homeassistant
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homeassistant-nfs
name: config
spec:
storageClassName: ""
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: homeassistant-nfs

View File

@@ -7,4 +7,5 @@ spec:
app: homeassistant
ports:
- port: 8123
targetPort: 8123
targetPort: 8123
name: http

View File

@@ -0,0 +1,13 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: homeassistant-servicemonitor
labels:
app: homeassistant
spec:
selector:
matchLabels:
app: homeassistant
endpoints:
- port: http
path: /api/prometheus

View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
labels:
app.kubernetes.io/name: homepage
data:
kubernetes.yaml: "" #|
# mode: cluster
settings.yaml: |
title: "Homepage"
background: https://images.unsplash.com/photo-1547327132-5d20850c62b5?q=80&w=3870&auto=format&fit=crop
cardBlur: sm
#settings.yaml: |
# providers:
# longhorn:
# url: https://longhorn.my.network
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/moll-re
services.yaml: |
- Media:
- Jellyfin backend:
href: https://media-backend.kluster.moll.re
ping: media-backend.kluster.moll.re
- Jellyfin vue:
href: https://media.kluster.moll.re
ping: media.kluster.moll.re
- Immich:
href: https://immich.kluster.moll.re
ping: immich.kluster.moll.re
- Productivity:
- OwnCloud:
href: https://ocis.kluster.moll.re
ping: ocis.kluster.moll.re
- ToDo:
href: https://todos.kluster.moll.re
ping: todos.kluster.moll.re
- Finance:
href: https://finance.kluster.moll.re
ping: finance.kluster.moll.re
- Home:
- Home Assistant:
href: https://home.kluster.moll.re
ping: home.kluster.moll.re
- Grafana:
href: https://grafana.kluster.moll.re
ping: grafana.kluster.moll.re
- Recipes:
href: https://recipes.kluster.moll.re
ping: recipes.kluster.moll.re
- Infra:
- Gitea:
href: https://git.kluster.moll.re
ping: git.kluster.moll.re
- ArgoCD:
href: https://argocd.kluster.moll.re
ping: argocd.kluster.moll.re
widgets.yaml: |
# - kubernetes:
# cluster:
# show: true
# cpu: true
# memory: true
# showLabel: true
# label: "cluster"
# nodes:
# show: true
# cpu: true
# memory: true
# showLabel: true
- search:
provider: duckduckgo
- openmeteo:
label: Zürich # optional
latitude: 47.24236
longitude: 8.30439
units: metric # or imperial
cache: 30 # Time in minutes to cache API responses, to stay within limits
format: # optional, Intl.NumberFormat options
maximumFractionDigits: 1
- datetime:
locale: de
format:
dateStyle: long
timeStyle: short
- adguard:
url: http://adguard-home-web.adguard-home:3000
docker.yaml: ""

View File

@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
# serviceAccountName: homepage
# automountServiceAccountToken: true
dnsPolicy: ClusterFirst
# enableServiceLinks: true
containers:
- name: homepage
image: homepage
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: config
subPath: custom.js
- mountPath: /app/config/custom.css
name: config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: config
configMap:
name: config
- name: logs
emptyDir: {}

View File

@@ -1,7 +1,8 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homarr-ingress
name: homepage-ingressroute
spec:
entryPoints:
- websecure
@@ -9,7 +10,7 @@ spec:
- match: Host(`start.kluster.moll.re`)
kind: Rule
services:
- name: homarr
port: 7575
- name: homepage-web
port: 3000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homepage
resources:
- namespace.yaml
- deployment.yaml
- service.yaml
- configmap.yaml
- ingress.yaml
images:
- name: homepage
newName: ghcr.io/gethomepage/homepage
newTag: v0.8.13

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: homepage-web
labels:
app.kubernetes.io/name: homepage
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: stripprefix
@@ -7,7 +7,7 @@ spec:
prefixes:
- /api
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: websocket
@@ -18,7 +18,7 @@ spec:
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: immich-ingressroute

View File

@@ -12,6 +12,13 @@ namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.3.0
version: 0.6.0
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.103.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.103.1

View File

@@ -1,16 +1,15 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-postgres
spec:
instances: 1
imageName: ghcr.io/bo0tzz/cnpgvecto.rs:16-v0.1.11
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.2
bootstrap:
initdb:
owner: immich
database: immich
secret:
secret:
name: postgres-password
postgresql:
@@ -19,16 +18,12 @@ spec:
storage:
size: 1Gi
storageClass: nfs-client
pvcTemplate:
storageClassName: ""
resources:
requests:
storage: "1Gi"
volumeName: immich-postgres
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: postgres-exporter
spec:
selector:
matchLabels:
"cnpg.io/cluster": immich-postgres
podMetricsEndpoints:
- port: metrics
monitoring:
enablePodMonitor: true

View File

@@ -24,3 +24,17 @@ spec:
requests:
storage: "50Gi"
volumeName: immich-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-postgres
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich-postgres
server: 192.168.1.157
# later used by cnpg

View File

@@ -2,10 +2,6 @@
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
## Refer there for more detail about the supported values
image:
tag: v1.91.4
# These entries are shared between all the Immich components
env:

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jackett
spec:
selector:
matchLabels:
app: jackett
template:
metadata:
labels:
app: jackett
spec:
containers:
- name: jackett
image: jackett
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 9117
volumeMounts:
- name: media
mountPath: /media
- name: config
mountPath: /config
volumes:
- name: media
persistentVolumeClaim:
claimName: media-downloads
- name: config
persistentVolumeClaim:
claimName: transmission-config
---
apiVersion: v1
kind: Service
metadata:
name: jackett
spec:
selector:
app: jackett
ports:
- protocol: TCP
port: 9117
targetPort: 9117
type: ClusterIP

View File

@@ -0,0 +1,50 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: media-downloads
resources:
- namespace.yaml
- pvc.yaml
- transmission.deployment.yaml
- radarr.deployment.yaml
- jackett.deployment.yaml
images:
- name: transmission
newName: haugene/transmission-openvpn
newTag: 5.3.1
- name: jackett
newName: lscr.io/linuxserver/jackett
newTag: latest
- name: radarr
newName: lscr.io/linuxserver/radarr
newTag: 5.4.6
---
# 2nd version
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: media-downloads
resources:
- namespace.yaml
- pvc.yaml
- qbittorrent.deployment.yaml
- qbittorrent.service.yaml
- qbittorrent.configmap.yaml
- radarr.deployment.yaml
- radarr.service.yaml
- radarr.configmap.yaml
- openvpn.secret.yaml
images:
- name: qbittorrent
newName: binhex/arch-qbittorrentvpn
newTag: 5.0.1-1-02
- name: radarr
newName: hotio/radarr
newTag: release-5.14.0.9383

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-config
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qbittorrent-config
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: qbittorrent
labels:
app: qbittorrent
data:
VPN_ENABLED: yes
VPN_USER: vpnbook
VPN_PASS: e83zu76
VPN_PROV: custom
VPN_CLIENT: openvpn
LAN_NETWORK: 10.244.0.0/24,10.9.0.0/24
WEBUI_PORT: "8080"
ENABLE_STARTUP_SCRIPTS: no

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: qbittorrent
spec:
selector:
matchLabels:
app: qbittorrent
replicas: 1
template:
metadata:
labels:
app: qbittorrent
spec:
containers:
- name: qbittorrent
image: qbittorrent
ports:
- containerPort: 8080
envFrom:
- configMapRef:
name: qbittorrent
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /config
securityContext:
capabilities:
add:
- NET_ADMIN
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
persistentVolumeClaim:
claimName: qbittorrent-config

View File

@@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: qbittorrent
spec:
selector:
app: qbittorrent
type: ClusterIP
ports:
- name: qbittorrent
port: 8080
targetPort: 8080

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: radarr
labels:
app: radarr
data:
# VPN_ENABLED: "true"
# VPN_CONF: "wg0"
# VPN_PROVIDER: "generic"
# VPN_LAN_NETWORK: "192.168.1.0/24"
# VPN_LAN_LEAK_ENABLED: "false"
# VPN_EXPOSE_PORTS_ON_LAN: ""
# VPN_AUTO_PORT_FORWARD: "false"
# VPN_AUTO_PORT_FORWARD_TO_PORTS: ""
# VPN_KEEP_LOCAL_DNS: "false"
# VPN_FIREWALL_TYPE: "auto"
# VPN_HEALTHCHECK_ENABLED: "false"
# PRIVOXY_ENABLED: "false"
# UNBOUND_ENABLED: "false"

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
spec:
selector:
matchLabels:
app: radarr
replicas: 1
template:
metadata:
labels:
app: radarr
spec:
containers:
- name: radarr
image: radarr
ports:
- containerPort: 7878
envFrom:
- configMapRef:
name: radarr
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /config
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
persistentVolumeClaim:
claimName: radarr-config

View File

@@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: radarr
spec:
selector:
app: radarr
type: ClusterIP
ports:
- name: radarr
port: 7878
targetPort: 7878

View File

@@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: transmission
spec:
selector:
matchLabels:
app: transmission
template:
metadata:
labels:
app: transmission
spec:
containers:
- name: transmission
image: transmission
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 9091
env:
- name: OPENVPN_PROVIDER
value: PROTONVPN
- name: LOCAL_NETWORK
value: 10.42.0.0/16
- name: OPENVPN_CONFIG
valueFrom:
secretKeyRef:
name: protonvpn
key: country
- name: OPENVPN_USERNAME
valueFrom:
secretKeyRef:
name: protonvpn
key: username
- name: OPENVPN_PASSWORD
valueFrom:
secretKeyRef:
name: protonvpn
key: password
volumeMounts:
- name: media
mountPath: /data
- name: config
mountPath: /config
securityContext:
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: media
persistentVolumeClaim:
claimName: media-downloads
- name: config
persistentVolumeClaim:
claimName: transmission-config
---
apiVersion: v1
kind: Service
metadata:
name: transmission
spec:
selector:
app: transmission
ports:
- protocol: TCP
port: 9091
targetPort: 9091
type: ClusterIP
---
apiVersion: v1
kind: Secret
metadata:
name: protonvpn
type: Opaque
stringData:
country: at.protonvpn.udp,fr.protonvpn.udp,pl.protonvpn.udp,ch.protonvpn.udp
username: VOYkNuZs5PHjeB8w
password: WvKCOPijcXKOqcL5d7zjXzOPToS4zPid

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-vue-ingress
@@ -17,7 +17,7 @@ spec:
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-backend-ingress
@@ -37,7 +37,7 @@ spec:
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: jellyfin-websocket
@@ -48,7 +48,7 @@ spec:
Connection: keep-alive, Upgrade
Upgrade: WebSocket
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: jellyfin-server-headers
@@ -60,4 +60,4 @@ spec:
accessControlAllowMethods: [ "GET","HEAD","OPTIONS" ] # "POST","PUT"
accessControlAllowOriginList:
- "*"
accessControlMaxAge: 100
accessControlMaxAge: 100

View File

@@ -2,13 +2,15 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: 8096
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/

View File

@@ -10,12 +10,11 @@ resources:
- web.deployment.yaml
- web.service.yaml
- ingress.yaml
- jellyfin.servicemonitor.yaml
images:
- name: jellyfin/jellyfin
newName: jellyfin/jellyfin
newTag: 10.8.13
newTag: 10.9.0
- name: ghcr.io/jellyfin/jellyfin-vue
newName: ghcr.io/jellyfin/jellyfin-vue
newTag: stable-rc.0.3.1

View File

@@ -1,39 +1,21 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-config-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/jellyfin-config
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-config-nfs
name: config
spec:
storageClassName: ""
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: jellyfin-config-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-data-nfs
name: media
spec:
capacity:
storage: "1Ti"
@@ -46,8 +28,7 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: media
name: jellyfin-data-nfs
name: media
spec:
storageClassName: ""
accessModes:
@@ -55,4 +36,4 @@ spec:
resources:
requests:
storage: "1Ti"
volumeName: jellyfin-data-nfs
volumeName: media

View File

@@ -20,13 +20,14 @@ spec:
cpu: "2"
ports:
- containerPort: 8096
name: jellyfin
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: jellyfin-config
- name: config
mountPath: /config
- name: jellyfin-data
- name: media
mountPath: /media
livenessProbe:
httpGet:
@@ -35,10 +36,10 @@ spec:
initialDelaySeconds: 100
periodSeconds: 15
volumes:
- name: jellyfin-config
- name: config
persistentVolumeClaim:
claimName: jellyfin-config-nfs
- name: jellyfin-data
claimName: config
- name: media
persistentVolumeClaim:
claimName: jellyfin-data-nfs
claimName: media

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: Service
metadata:
name: jellyfin-server
labels:
app: jellyfin-server-service
spec:
selector:
app: jellyfin-server

7
apps/minecraft/README.md Normal file
View File

@@ -0,0 +1,7 @@
## Sending a command
```
kubectl exec -it -n minecraft deploy/minecraft-server -- /bin/bash
mc-send-to-console /help
# or directly
kubectl exec -it -n minecraft deploy/minecraft-server -- mc-send-to-console /help
```

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft-server
spec:
selector:
matchLabels:
app: minecraft-server
template:
metadata:
labels:
app: minecraft-server
spec:
containers:
- name: minecraft-server
image: minecraft
resources:
limits:
memory: "4000Mi"
cpu: "2500m"
requests:
memory: "1000Mi"
cpu: "500m"
ports:
- containerPort: 25565
env:
- name: EULA
value: "TRUE"
- name: MODPACK
value: "https://www.curseforge.com/api/v1/mods/711537/files/5076228/download"
- name: VERSION
value: "1.18.2"
# - name: VERSION
# value: "1.16.5"
# - name: MODPACK
# value: "https://mediafilez.forgecdn.net/files/3602/5/VaultHunters-OfficialModpack-1.12.1-Server.zip"
- name: INIT_MEMORY
value: "1G"
- name: MAX_MEMORY
value: "3G"
- name: MOTD
value: "VaultHunters baby!"
- name: ENABLE_RCON
value: "false"
- name: CREATE_CONSOLE_IN_PIPE
value: "true"
- name: ONLINE_MODE
value: "true"
volumeMounts:
- name: minecraft-data
mountPath: /data
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minecraft
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
images:
- name: minecraft
newName: itzg/minecraft-server
newTag: java21

11
apps/minecraft/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: minecraft-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: minecraft-server
spec:
selector:
app: minecraft-server
ports:
- port: 25565
targetPort: 25565
type: LoadBalancer
loadBalancerIP: 192.168.3.4

View File

@@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
spec:
encryptedData:
password: AgBe8isrCWd5MuaQq5CpA+P3fDizCCDo23BVauaBJLuMRIYbVwpfahaJW7Ocj3LTXwdeVVPBrOk2D6vESUXu6I0EWc3y/NFN4ZezScxMcjmeaAb+z1zWwdH0FynTPJYOxv1fis1FDTkXDmGy3FXo5NDK9ET899TtulKFkh7UqSxdrRWbD3pegJgqKGPIqDCTAxZN/ssiccfWGS4lHqQBJkXn8DeampcKwjOCvgaBdilF03GoSfpgsqa2Iw2SfTDEobWBWVMMK/RB3/Oi/YJkGwMW3ECUxvTDam8gb0RFA1xjWXoYTLVVP5fK7q7x63ns51HebloxAP1GBrt138N/iDrfbGfjNP8Lx0NFl5y5bTgYN/z8DVTOFf90xxWe+YYERdwllg0Ci1JLNbA+NszXTD4L/HC7a8XuBfjRzxMTeymNjR76jzfPkH6v1EvesOduTfSrahPgS0qS+eGOier1rHxj3EBRhOScY1ut5Bq4oJMNId9nMVbVa6xyq2HyxuJHXV+j6h5FGHmEXn9gIR7wGp8RhtPhKgVGLrHcbHZ5Th2E7eomz1T2NK/ezNP8ZhcwOj/lyGywlW0vhU798zpWhMf57k2OPeuMlfs8Y8y74epBdyBjsrMR4EDctF8RZR3vraxENiMJ6kk1gqKj04ir6HwL7blqwiybIFFnJrp2j7MzgjS4SQ687qMX5Zf5XT03aEE+9W9Epy73tT7zVQKdENCQlcm5
user: AgAdiOivMn0d+nYjYycMZz9QSiS/9QqwHPJQMHkE7/IOou+CJtBknlETNtdv84KZgBQTucufYqu3LR3djOBpdnQsYbIXDxPFgRZQ11pwu/sO2EGifDk218yyzzfZMvx1FL7JL4LI1rKoiHycZowCwsAjEtlICVOOYv1/Plki+6MHXiAGG4r/yUhugGx3VLLX+Poq8oaTeHndgSsFXJege8SfgYR4TsC7pQgsM1UQEFncGIhJYTD2ashmUxFJ+7CJjHqPR0lFRrZXmFvPwTYTCMT+tnSHnCFWtTht8cEi1NxA4kD/eKEX0rOol15EUZnFUws2WqWI634TbyGwZ7km/Yw4XoDxiQR4ar6ulkqb/djcc3cWDYE7PF1m1c+r3iog85S5CSfZ5EvdCHHrbPN9uO2gmoRQWiR5qI70YMxBSnkeLZWN05O1vUuopdXFDTafY7YskxLEdIGHGqFUpUrJZOvBB0zNBdHGgYxFzb5pNmMCC5LPlOuoKjV4yskh9Tgovz06aAvsPxn2WWx6NOJambeziKB5OmSKvPsFofViyGBekVAWSWtt9yJe6lu5OKpBEiA6xhGhQ4ZryTXu9wvVALuPSIwBFITv85sIxjJb80qhJ51wb12QgzLLcPby0HSanyBI1M4jfsXWpK8gIAbDNO+eD7z3PhD9Y/5hPqYKXZ37Geyq23xiyxG8XDj6cL+Ie6k8XipayI4=
template:
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: monitoring
type: Opaque

View File

@@ -1,5 +1,5 @@
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
metadata:
name: grafana-ingress
spec:

View File

@@ -1,4 +1,3 @@
replicas: 1
## Create a headless service for the deployment
@@ -10,13 +9,6 @@ headlessService: false
##
service:
enabled: true
type: ClusterIP
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
annotations: {}
labels: {}
portName: service
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
@@ -24,42 +16,54 @@ serviceMonitor:
##
enabled: false
ingress:
enabled: false
persistence:
type: pvc
enabled: true
# storageClassName: default
accessModes:
- ReadWriteOnce
size: 10Gi
# annotations: {}
finalizers:
- kubernetes.io/pvc-protection
# selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
existingClaim: grafana-nfs
## If persistence is not enabled, this allows to mount the
## local storage in-memory to improve performance
##
inMemory:
# credentials
admin:
existingSecret: grafana-admin-secret
userKey: user
passwordKey: password
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Thanos
type: prometheus
url: http://thanos-querier.prometheus.svc:9090
isDefault: true
- name: Prometheus
type: prometheus
url: http://prometheus.prometheus.svc:9090
isDefault: false
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps:
default: grafana-dashboards
grafana.ini:
wal: true
default_theme: dark
unified_alerting:
enabled: false
## The maximum usage on memory medium EmptyDir would be
## the minimum value between the SizeLimit specified
## here and the sum of memory limits of all containers in a pod
##
# sizeLimit: 300Mi
initChownData:
## If false, data ownership will not be reset at startup
## This allows the prometheus-server to be run with an arbitrary user
##
enabled: true
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword

View File

@@ -6,28 +6,15 @@ namespace: monitoring
resources:
- namespace.yaml
- grafana.pvc.yaml
- influxdb.pvc.yaml
# - influxdb.pvc.yaml
- grafana.ingress.yaml
# prometheus-operator crds
- https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.70.0/bundle.yaml
- prometheus.yaml
- grafana-admin.sealedsecret.yaml
- dashboards/
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 7.0.19
version: 7.3.9
valuesFile: grafana.values.yaml
- releaseName: influxdb
name: influxdb2
repo: https://helm.influxdata.com/
version: 2.1.2
valuesFile: influxdb.values.yaml
- releaseName: telegraf-speedtest
name: telegraf
repo: https://helm.influxdata.com/
version: 1.8.39
valuesFile: telegraf-speedtest.values.yaml

View File

@@ -1,5 +0,0 @@
### Runninf `occ` commands:
```
su -s /bin/bash www-data -c "php occ user:list"
```

View File

@@ -1,16 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud.kluster.moll.re`)
kind: Rule
services:
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.sealedsecret.yaml
namespace: nextcloud
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

View File

@@ -1,22 +0,0 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
}
},
"encryptedData": {
"password": "AgCTmvBe9YFnyWOdz02rxr0hTXnWuVLeUt5dpieWMzl4cVMBj7WcyyODWtNd+eQOLARRssGNZAP4C9gH90iVRFAW1aU+NeA76oceXE5Kiiqoc8T30wE5FC6/UbTjQYRH520NF4wcCQKm//iH8o5uI2+NxZW4goeuShibXK9sijFVNXxUuTeXTmaSJjEPyB+pnmPwjzw+qjhkJJADefh9oryy5+t9ecCwXDiI/2ce2n1Vawm/Nq6/0rZMUSsF8XSiTFczKMunuGMhxGEyyx/I8NZd4XMXGSnBo0YZF7jR9+eRHIjuenPHq1kfEid2Ps4fhFSE8mEecnK7w5xE3r0XeTNHQcTId1yYneK/LQfcRkzInuRddytTwTAmsoSjROcjKjAvtyZSM81pFWJsMQ7bSVXOC0K2wvEz9khDT0RIoR/8tMh2G737F15raTe9Ggbgy3DHst4mYIpoWV/slHrOF0vR9j7X+MRN9R1cVtI1coof/tVSWQsLvv0AJfB4/6dUl+i/yNO/j+4c3WolGwqyXd+oxsZK1VrSwSCBZwBO17BmePJL2QsPVRdutq06TrlvGqP4wXySH9LRuHr3sWgr2VuDV00w+UvuU7ExI+16dWh7jrn/rvIBQSJlHDhl5+VpyM0WTMy5kSfO6nits73ZzT7BAoSU7AeQOMj3t+cUiEq9f9dk7em7QxWMuWg6QIJ+ZZ2+CCBms4rSE4x2glOxanNX/HktQg==",
"username": "AgCxJKzhsF7yNJesK5oLJP62kjFnX4UUNQ2NrHl02Hv6MAzi/AUEV3uJSXXIi3H/uMJSMxRpJQjIDsrznYVI0YHOoz1M8/y1dx8xotFv/i0XByI9sMuGtesop7ncmQbEPMaJ3pqTJyaGkEwcsEMGmwwYiRfJHmEhhCYtzEc5IAnx+nmk//HYsrSWKpJGSWl0LvdMJsnsTxrWoJjaYTW3J0Of3VOOmgkuwIFKyXW9S2cUbAco8xVYchbyiHc8LXbS3izyAidRzg1OWyqvTGMIKJDQZ3ibIiXheon5ZeYjj0fkEkv3TrB7WoKdo0090OY1eHabqAPHT8aP+WG1g6TAzbJEtg+zFfYDKIw5Tp1WkRlsD2me4HycGuZbsaXgP5vWlxF5+rULUzUgxfmTRmYTl0H8kIlmUrusZwxR5ZXnSuBJ3n3AMEjmpmTTALakxEFEPDJJoVbgcViLtANwk72yu15FlOxczT22uyW8FMkj9kYzcq/+2a/EjaTo62SnUYJ3UTQXvgMKML1yJD+zym2+xscPNmwZFBPN5BQ/64ru/Z51nWB20fWFgW3Rw67jEQMajmVclmUcASWOjHzO87feEprHeilTH+224IHzpmC4aLz/JtIP9EEvqfDUr3fRrxcgtT1DgxV37vPj6Pqn47MHr39AA850CxjFmb1VcwfH6ygXABFlxnVByZDn7xCyBNswtKJqtw=="
}
}
}

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-nfs
spec:
capacity:
storage: "150Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/nextcloud
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
volumeName: nextcloud-nfs

View File

@@ -1,171 +0,0 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
image:
tag: "28"
ingress:
enabled: false
nextcloud:
host: nextcloud.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: true
## Supported database engines: mysql or postgresql
type: postgresql
## Database host
host: postgres-postgresql.postgres
## Database user
# user: nextcloud
# ## Database password
# password: test
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: true
secretName: postgres-password
usernameKey: username
passwordKey: password
##
## MariaDB chart configuration
##
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron
##
cronjob:
enabled: false
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-nfs
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: true
# disable when upgrading from a previous chart version
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: websocket
@@ -9,7 +9,7 @@ spec:
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ntfy-ingressroute

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: binwiederhier/ntfy
newName: binwiederhier/ntfy
newTag: v2.8.0
newTag: v2.10.0

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
spec:
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: mealie
image: mealie
resources:
limits:
memory: "500Mi"
cpu: "500m"
ports:
- containerPort: 9000
env:
- name: ALLOW_SIGNUP
value: "true"
- name: TZ
value: Europe/Paris
- name: BASE_URL
value: https://recipes.kluster.moll.re
volumeMounts:
- name: mealie-data
mountPath: /app/data
volumes:
- name: mealie-data
persistentVolumeClaim:
claimName: mealie

16
apps/recipes/ingress.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mealie-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`recipes.kluster.moll.re`)
kind: Rule
services:
- name: mealie-web
port: 9000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: recipes
resources:
- namespace.yaml
- deployment.yaml
- pvc.yaml
- service.yaml
- ingress.yaml
images:
- name: mealie
newTag: v1.6.0
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/recipes/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce

10
apps/recipes/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie-web
spec:
selector:
app: mealie
ports:
- port: 9000
targetPort: 9000

View File

@@ -18,9 +18,9 @@ spec:
ports:
- containerPort: 7070
volumeMounts:
- name: rss-data
- name: data
mountPath: /data
volumes:
- name: rss-data
- name: data
persistentVolumeClaim:
claimName: rss-claim
claimName: data

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
@@ -14,4 +14,3 @@ spec:
port: 80
tls:
certResolver: default-tls

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
name: placeholder

View File

@@ -1,9 +1,9 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: rss-claim
name: data
spec:
storageClassName: nfs-client
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:

6
apps/todos/README.md Normal file
View File

@@ -0,0 +1,6 @@
### Adding a user
```bash
kubectl exec -it -n todos deployments/todos-vikunja -- /app/vikunja/vikunja user create -u <username> -e "<user-email>"
```
Password will be prompted.

21
apps/todos/ingress.yaml Normal file
View File

@@ -0,0 +1,21 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: todos-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`todos.kluster.moll.re`) && PathPrefix(`/api/v1`)
kind: Rule
services:
- name: todos-api
port: 3456
- match: Host(`todos.kluster.moll.re`) && PathPrefix(`/`)
kind: Rule
services:
- name: todos-frontend
port: 80
tls:
certResolver: default-tls

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: todos
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
# helmCharts:
# - name: vikunja
# version: 0.1.5
# repo: https://charts.oecis.io
# valuesFile: values.yaml
# releaseName: todos
# managed by argocd directly

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/todos/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce

51
apps/todos/values.yaml Normal file
View File

@@ -0,0 +1,51 @@
######################
# VIKUNJA COMPONENTS #
######################
# You can find the default values that this `values.yaml` overrides, in the comment at the top of this file.
api:
enabled: true
image:
tag: 0.22.1
persistence:
# This is your Vikunja data will live, you can either let
# the chart create a new PVC for you or provide an existing one.
data:
enabled: true
existingClaim: data
accessMode: ReadWriteOnce
size: 10Gi
mountPath: /app/vikunja/files
ingress:
main:
enabled: false
configMaps:
# The configuration for Vikunja's api.
# https://vikunja.io/docs/config-options/
config:
enabled: true
data:
config.yml: |
service:
frontendUrl: https://todos.kluster.moll.re
database:
type: sqlite
path: /app/vikunja/files/vikunja.db
registration: false
env:
frontend:
enabled: true
image:
tag: 0.22.1
ingress:
main:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
typesense:
enabled: false

View File

@@ -1,13 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
namespace: whoami
name: whoami-ingressroute
annotations:
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`) || Host(`moll.re`)
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`)
kind: Rule
services:
- name: whoami

View File

@@ -12,7 +12,9 @@ spec:
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
# selfHeal: true
# syncPolicy:
# automated:
# prune: true
# selfHeal: false
# DO NOT AUTO SYNC THE APP OF APPS.
# all other apps are auto-synced, but adding new apps should be done manually.

View File

@@ -1,5 +1,5 @@
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingressroute

View File

@@ -0,0 +1,15 @@
# How to restore
1. Port forward the rest api for gcloud
```bash
kubectl port-forward -n backup service/rclone-gcloud 8000
```
2. Load the snapshots locally
```bash
restic -r rest:http://127.0.0.1:8000/kluster mount /mnt/restic
```
(The password is in a secret)
3. Copy relevant files to the correct location on the NAS

Some files were not shown because too many files have changed in this diff Show More