154 Commits

Author SHA1 Message Date
d143a90228 testing a few sample (joint) configurations 2024-03-03 20:35:37 +01:00
1ad56fd27e Merge pull request 'Update Helm release traefik to v26.1.0' (#42) from renovate/traefik-26.x into main
Reviewed-on: #42
2024-03-03 19:33:13 +00:00
773a155627 Update Helm release traefik to v26.1.0 2024-03-03 19:33:13 +00:00
61945b3507 Merge pull request 'Update Helm release metallb to v0.14.3' (#34) from renovate/metallb-0.x into main
Reviewed-on: #34
2024-03-03 19:32:16 +00:00
4aa21cb0cd Update Helm release metallb to v0.14.3 2024-03-03 19:32:16 +00:00
d233ab96eb Merge pull request 'Update Helm release gitea to v10.1.3' (#46) from renovate/gitea-10.x into main
Reviewed-on: #46
2024-03-03 19:31:04 +00:00
df581e0110 Update Helm release gitea to v10.1.3 2024-03-03 19:31:04 +00:00
8a114b9384 remove homarr 2024-03-03 20:30:06 +01:00
ab6506f4f2 update immich 2024-02-21 18:35:13 +01:00
87242d293a Merge pull request 'Update Helm release homarr to v1.0.6' (#38) from renovate/homarr-1.x into main
Reviewed-on: #38
2024-02-13 10:34:15 +00:00
11d46ec295 Merge pull request 'Update Helm release gitea to v10.1.1' (#35) from renovate/gitea-10.x into main
Reviewed-on: #35
2024-02-13 10:33:42 +00:00
1b3702c4c8 Update Helm release gitea to v10.1.1 2024-02-13 10:33:42 +00:00
9b68b4a915 lets be more generous with memory 2024-02-11 18:15:11 +01:00
18889d7391 add other recipes 2024-02-11 11:28:30 +01:00
a38ad1d7e6 bye bye 2024-02-10 19:35:22 +01:00
edcb9158f5 what now? 2024-02-10 19:21:04 +01:00
71b1c252f3 turns out it was important 2024-02-10 19:17:28 +01:00
b30f44d2c6 last chance 2024-02-10 19:16:08 +01:00
85abf0fda6 with services? 2024-02-10 19:04:08 +01:00
5e21ceaad3 lets try this 2024-02-10 18:58:20 +01:00
3f5c1a5a5c add configmap 2024-02-10 10:56:59 +01:00
0195833fc3 service account not needed 2024-02-10 10:54:41 +01:00
64835e16de slight fix 2024-02-10 10:53:20 +01:00
4e11a33855 correct backend 2024-02-10 10:46:38 +01:00
bad024861a add recipes 2024-02-10 10:45:53 +01:00
fe5d6a9014 Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.44' (#39) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #39
2024-02-08 09:24:43 +00:00
f2898d7e0b Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.2' (#40) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #40
2024-02-08 09:24:05 +00:00
f67f0c8889 Update homeassistant/home-assistant Docker tag to v2024.2 2024-02-07 21:02:14 +00:00
0ccb17d8e1 Update adguard/adguardhome Docker tag to v0.107.44 2024-02-07 11:01:45 +00:00
bb6d417937 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.2.0' (#36) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #36
2024-02-07 10:09:46 +00:00
4e2ebe2540 Merge pull request 'Update octodns/octodns Docker tag to v2024' (#37) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #37
2024-02-07 10:09:26 +00:00
c5310b0f00 Update Helm release homarr to v1.0.6 2024-02-04 17:01:35 +00:00
46ef973f70 Update octodns/octodns Docker tag to v2024 2024-02-03 22:02:18 +00:00
c12d2dc7a6 whoopsie 2024-02-03 22:27:29 +01:00
e28c6ffd52 add physics 2024-02-03 22:19:09 +01:00
7ba6860ea0 Update actualbudget/actual-server Docker tag to v24.2.0 2024-02-03 21:01:51 +00:00
33c23ee42b Merge pull request 'Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1' (#31) from renovate/ghcr.io-immich-app-immich-machine-learning-1.x into main
Reviewed-on: #31
2024-02-03 20:58:07 +00:00
b2f8c8bced Merge branch 'main' into renovate/ghcr.io-immich-app-immich-machine-learning-1.x 2024-02-03 20:57:54 +00:00
d5277d3d6a Merge pull request 'Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1' (#32) from renovate/ghcr.io-immich-app-immich-server-1.x into main
Reviewed-on: #32
2024-02-03 20:56:19 +00:00
e3c90f5ede Merge branch 'main' into renovate/ghcr.io-immich-app-immich-server-1.x 2024-02-03 20:55:47 +00:00
eb5bda63db Merge pull request 'Update Helm release grafana to v7.3.0' (#26) from renovate/grafana-7.x into main
Reviewed-on: #26
2024-02-03 20:54:45 +00:00
a10a216f0e Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1 2024-01-31 20:01:05 +00:00
3cf9fd0b87 Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1 2024-01-31 20:01:03 +00:00
ea1fa1637f Update Helm release grafana to v7.3.0 2024-01-30 15:00:50 +00:00
96abe2a0f5 auto admin 2024-01-23 18:16:40 +01:00
9623f33b59 Merge pull request 'Update Helm release gitea to v10' (#16) from renovate/gitea-10.x into main
Reviewed-on: #16
2024-01-22 10:30:17 +00:00
b065fc7e59 idioto 2024-01-22 11:27:58 +01:00
617ed5601c allow renovate to fetch release notes 2024-01-22 11:11:34 +01:00
7e21ce4181 Update Helm release gitea to v10 2024-01-22 10:00:35 +00:00
eeaed091ab Merge pull request 'Update Helm release metallb to v0.13.12' (#30) from renovate/metallb-0.x into main
Reviewed-on: #30
2024-01-16 08:59:45 +00:00
ee52d2b777 Update Helm release metallb to v0.13.12 2024-01-15 19:00:31 +00:00
384e9fbaec no service account needed 2024-01-15 19:12:19 +01:00
606aded35f argo manage metallb 2024-01-15 19:03:49 +01:00
a3aa8888e9 or like that? 2024-01-14 17:31:24 +01:00
aaeb43e9c3 let's check if we get ips like that 2024-01-14 17:27:37 +01:00
a9b1d02a7e keeping some ips here 2024-01-14 17:22:57 +01:00
76b49270eb fix type 2024-01-14 12:58:42 +01:00
9b57715f92 bad yaml 2024-01-14 12:56:23 +01:00
85a96cf87b bump version 2024-01-14 12:54:33 +01:00
78b4be8fbd next try 2024-01-14 12:51:14 +01:00
7bc10b57ce lets try adding thanos 2024-01-14 12:41:03 +01:00
de26a052e8 QOL improvements 2024-01-11 22:05:05 +01:00
28ff769757 Deploy full on octodns 2024-01-11 21:57:02 +01:00
6a58ea337e forgot secret 2024-01-11 21:38:24 +01:00
2af279c161 still crashes, now due to auth 2024-01-11 21:37:29 +01:00
c26997ff83 single run only 2024-01-11 18:39:13 +01:00
a354464f6e try with local directory 2024-01-11 18:26:37 +01:00
268a9f3a7a correct env vars and labels 2024-01-11 18:12:12 +01:00
4ddeaf6c99 try this 2024-01-11 18:08:35 +01:00
b6f9a818af Execute 2nd command as well 2024-01-11 18:04:55 +01:00
f4670aa471 Add ddns 2024-01-11 17:59:56 +01:00
72a2914c24 correct git target 2024-01-11 17:52:29 +01:00
1d5bc8a9c1 why? 2024-01-11 17:51:01 +01:00
892c412fd9 let's tune it down 2024-01-11 17:46:25 +01:00
b6f7ead955 whoopsie 2024-01-11 17:44:58 +01:00
f033ba16eb correct version 2024-01-11 17:43:31 +01:00
f3ae2c424b use octodns 2024-01-11 17:42:35 +01:00
36035ee84d bump immich version 2024-01-11 10:08:12 +01:00
50679b400a Merge pull request 'Update actualbudget/actual-server Docker tag to v24' (#28) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #28
2024-01-10 16:08:35 +00:00
a68fb5f0a7 Update actualbudget/actual-server Docker tag to v24 2024-01-10 13:00:43 +00:00
5792367b8b Add finance to auto deploy 2024-01-10 13:15:42 +01:00
3699b79f1a let's try these monitorings 2024-01-08 15:48:38 +01:00
e473abda12 Merge pull request 'Update Helm release grafana to v7.0.21' (#25) from renovate/grafana-7.x into main
Reviewed-on: #25
2024-01-08 13:01:14 +00:00
f67f586006 Update Helm release grafana to v7.0.21 2024-01-08 10:00:33 +00:00
61e1276f02 maybe like that 2024-01-07 12:30:51 +01:00
111fd35fc3 needed? 2024-01-07 12:18:06 +01:00
cc4148fb8a correct crds 2024-01-07 12:16:47 +01:00
f1e624985f come on 2024-01-07 12:15:10 +01:00
c8d7d3c854 use traefik 2024-01-07 12:12:46 +01:00
4880503609 Is actually a token 2024-01-07 12:06:53 +01:00
f905ce1611 maybe it wes a token actually? 2024-01-07 12:05:42 +01:00
ecfc65ecdd try like this? 2024-01-07 11:59:41 +01:00
7da1d705a4 update authorization 2024-01-07 11:51:20 +01:00
299cbea97e change ingress slightly 2024-01-07 11:41:05 +01:00
b633d61920 update whoami 2024-01-07 11:39:10 +01:00
bfb8244e59 made a dum dum 2024-01-07 11:37:38 +01:00
33c2df9fa3 add external dns 2024-01-07 11:35:52 +01:00
3d84d6bed1 does servicemonitor accept this? 2024-01-04 18:29:18 +01:00
cf6a931097 fix port names 2024-01-04 18:27:03 +01:00
53c3865072 fix label syntax 2024-01-04 18:23:32 +01:00
d09a3509af trying to monitor syncthing 2024-01-04 18:21:26 +01:00
8c0abc16c4 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024' (#24) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #24
2024-01-04 08:45:45 +00:00
399969677f Merge pull request 'Update Helm release immich to v0.3.1' (#22) from renovate/immich-0.x into main
Reviewed-on: #22
2024-01-04 08:44:55 +00:00
762756310a Update homeassistant/home-assistant Docker tag to v2024 2024-01-03 21:00:38 +00:00
ec964be7c3 whoopsie 2023-12-31 18:49:54 +01:00
0603da76b2 update gitea metric collection 2023-12-31 18:40:57 +01:00
a437c4228e update some scraping config 2023-12-31 18:26:45 +01:00
d5aab95186 try as a string 2023-12-31 17:58:15 +01:00
3acb329730 try again 2023-12-31 17:55:22 +01:00
73ce4e340f try again 2023-12-31 17:44:42 +01:00
0d4b6f4605 remove label requiremetns 2023-12-31 17:37:51 +01:00
deeb35bbb6 test monitoring 2023-12-31 17:34:11 +01:00
d4c658a28c match all servicemonitors? 2023-12-31 17:13:58 +01:00
1fcebe033b fix annotations 2023-12-31 17:06:13 +01:00
8fe51863f4 fix tag 2023-12-30 10:48:46 +01:00
c4eda4e75d fix tag 2023-12-30 10:45:23 +01:00
9490015728 maybe like that? 2023-12-30 10:42:23 +01:00
a641df167f remove port names 2023-12-30 10:39:55 +01:00
21d100fb62 update service config 2023-12-30 10:38:59 +01:00
26b06c553a deploy syncthing 2023-12-30 10:30:05 +01:00
d51bfcf7db Merge pull request 'Update Helm release homarr to v1.0.4' (#23) from renovate/homarr-1.x into main
Reviewed-on: #23
2023-12-27 17:27:57 +00:00
788c2436fc Update Helm release homarr to v1.0.4 2023-12-27 17:00:32 +00:00
c9e6d08dcd temporary home page 2023-12-26 14:56:57 +01:00
6b2e9f7165 small updates 2023-12-26 14:54:49 +01:00
8618468534 more ddns verbosity 2023-12-26 14:52:09 +01:00
94d6c0f523 update to match bash syntax 2023-12-26 14:37:43 +01:00
9aca8e9e0b add automatic dns updates 2023-12-26 14:34:57 +01:00
72b7734535 postgres metrics 2023-12-24 14:42:04 +01:00
28f33f8ff7 update misconfigs 2023-12-24 14:09:11 +01:00
4cf26679c6 add prometheus monitoring 2023-12-24 13:44:22 +01:00
1cd4df8b8f update prom cfg 2023-12-24 11:33:32 +01:00
adeb333954 add svc 2023-12-23 20:40:34 +01:00
e6bd080c6e switch to prometheus operator 2023-12-23 20:20:27 +01:00
c9f883eaa6 Update Helm release immich to v0.3.1 2023-12-23 16:00:31 +00:00
014309bad6 add prometheus 2023-12-23 15:39:03 +01:00
c61698fad9 correct vector version 2023-12-22 01:21:35 +01:00
8c21d58529 vectors finally 2023-12-22 00:51:38 +01:00
722b7c3fb6 correct pg version 2023-12-22 00:34:44 +01:00
b852da0321 try bumping the version 2023-12-22 00:19:32 +01:00
9c5affeff6 update immich 2023-12-22 00:06:00 +01:00
b6c2f57acf new db 2023-12-22 00:03:18 +01:00
2e4e033c36 local postgres 2023-12-22 00:00:30 +01:00
285a7541ca fix 2023-12-21 18:02:22 +01:00
dbf58027d8 trying cloudnative postgres 2023-12-21 18:00:20 +01:00
2f9019b6ba fixing pvc 2023-12-21 12:37:29 +01:00
1743ffca74 grafana cleanup 2023-12-21 12:28:48 +01:00
ea7527c143 Merge pull request 'Update Helm release grafana to v7' (#20) from renovate/grafana-7.x into main
Reviewed-on: #20
2023-12-21 11:17:01 +00:00
c27b289866 Update Helm release grafana to v7 2023-12-21 10:00:38 +00:00
4cbd95fd78 Merge pull request 'Update Helm release grafana to v6.61.2' (#19) from renovate/grafana-6.x into main
Reviewed-on: #19
2023-12-21 09:12:50 +00:00
5cfb2a02e3 Merge pull request 'Update Helm release telegraf to v1.8.39' (#18) from renovate/telegraf-1.x into main
Reviewed-on: #18
2023-12-21 09:12:21 +00:00
82559e848a Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.43' (#9) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #9
2023-12-21 09:11:54 +00:00
2f31cd6934 Update Helm release grafana to v6.61.2 2023-12-18 13:00:30 +00:00
4fdd4a39f5 Update Helm release telegraf to v1.8.39 2023-12-18 12:00:33 +00:00
86d32efc64 Update adguard/adguardhome Docker tag to v0.107.43 2023-12-11 15:00:22 +00:00
123 changed files with 2686 additions and 1586 deletions

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git

View File

@@ -4,8 +4,7 @@
### Initial setup
#### Requirements:
- A running k3s instance run:
- `metalLB` deployed
- A running k3s instance
- `sealedsecrets` deployed
#### Installing argo and the app-of-apps

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.42
newTag: v0.107.44
namespace: adguard

View File

@@ -24,6 +24,8 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-tcp
nodePort: 31306
@@ -46,6 +48,7 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-udp
nodePort: 30547

8
apps/files/README.md Normal file
View File

@@ -0,0 +1,8 @@
# File sync
My personal cross-platform filesync. Using syncthing for my android and linux clients. And nextcloud for my ios clients.
## Overview
Both services share a common persistence which allows them to apply each their own logic for synching to other devices. The server acts as a relay.

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: files
resources:
- namespace.yaml
- pvc.yaml
- syncthing/
- nextcloud/

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud2.kluster.moll.re`)
kind: Rule
services:
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- ingress.yaml
- postgres.yaml
- postgres-credentials.sealedsecret.yaml
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

View File

@@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files
spec:
encryptedData:
database: AgBOgmqlfgiN2VqxNyYL6O+/jdzPmGg97zOXxZ7KiD07b4/2FdmlWgOZZp7oUpQ9RMV0WybC0jau2YVlgXB32afgJ3uinaAAhzZwvzy8dgapNpe8ClxnFINRhKKC9kxK7YeDwtptbDQn7YtEmVGHI66/71VyGy7NME4Pk0Y4FxxpF6KAZMAHNyez4JMa9V+XFtYV5G5bOkPY/ku4LcYntiMAlEaArF+re1m5nLQmZ4SVkWlOc41N4Hv1HrCv8qq2kj7zVR5/J2qW8NlzmdJJqv1AP1foELuITZZKxwNspxynNxhjXTX0fP6vzfJpxtzb2s/4Yh2uT/UPb2rOdcGaXjjHKxjSX23tG5ZT+z5lt0y9UEmUYytlcsYv9vsRqCmeFsB63S7aABeCRSOJyGLsuUc7xqSZ2ijDG38qLij+JPgoEIbSLfRYVGE5GMo9EbHt4N+ZIMpJYQXq0VhDip/r11SENfUa3XoautQ5uVR1D50FuSrN16t24bQXai9uifkBpDyvqbiqgv7s3qOjF9u8I0eyeJA0ZO1JO174B9SO3IcZYys8c87fSuWvFbGepLNqfneSIx93klDUdx3YEjqcrqib49+3/dn3RO9/puyhJ6O0TEZneToyauV3lxpR+XG/PDx7EQ88lELgD/AmtulsLHkYNgpoblFPbgDUeHhOgoBRAe22Hiy0Co4eh0SPVPyKhj8MyYhPtLEV+UY=
password: AgB2eY5aKJhEcJIgArGRrsqYf5pJJoXHRkplFpaqCCQW7X7WLREb+35HDijhnJSWRI2/LXDVy/8ArJe1LiiW+05aRY/9nvmjdpUmvsdQ6DK1mvirl8Py4JYueNrk2iUmI1h+ROyubBCvRBKxueQNkuwipKvk7nIlON6cwFnqp6GPcuWihSG/GZ2nSZmxmu+thdsM/S8DPaTW/N+Sut8DyarlCN94ZRiFVZIJialibfsJGQtL/uPX0W61GTkEU4m34IN9e+POdEdg3HuFMd3RvNQpndgPjaTv4A22TJRFs+rcHlcHr+5r8acVy1V+sZy97126Z7moeKDp1rFbG2/yMT1iS2oxQN4GJceTgMzSagqdn+KgD0N38OYvp+mRUQsl7+Fpglcq03vqbvxsc1fC78XpAAPMNA/pQDvtlS1qjuB7WCa5b3mkJxjc8efIuna9GAnDGh+djhlGHLEERnEfjlnpeDb/afRejUX+i6r00GnBxuRJfV+lKh4BJsnJm29nC4t10F6ff91Ngcjf+wCm5yWSFETZ9oFrPn10igGvoZwROJYABdtfMNjidGLkdKQnG1dj3EFu5XDn9vRqt8Iu/dEyoh7a2iGYDQ1lGpz+zxA/OZ9l/SuL6JUUwXq5W1/fSbtaBPdit4cwUTopq7AcpZkMuAQyVy6N9D0Hvjx432rCxmqyGU8PyjKHoAN+nuvTi79HtHR2wo4hJeIDoktdpxswSCe9VJEvqTFGQyCZtX3uEg==
username: AgATMaQ/BRCO9vx329YxGGUGl3E68Tm3coU6IO6pYm8f+Uf7ImH4l/P84mjGDLho1zBUfILPAvM4G5xG2qkkyW4mEuB8A7NNWAhXMOS5i1msNaV2oqLYNWCOG2lFO7unkYwPSyu9EyGn/Hq/kbGPAKfUf6dtDLEc+Y0S5Ue9YA2gYK4VYUec491+02EOoprGcfM1QdGPLBrunXn4krxtGm+eTsK8nd/lnm3DK+f5uGupO844i8T0mXE1xcliysBTZzxEVpmzPN8q4TMay6qcB2wOvEyngnGCfxJGTSjTrkydPFLcI4p6IONW5QAX9eQwo6ZDo56WVNgvyNW+ZJ6hmPP9nLeHnKb3rM91CIMM0GDRYc3VFsVXwBY/sj12hiompXEVQEp+EJUbgnDLK2lW+J602ZnzyHFgwGKnfdI8PHfKoxRVf06TXPdROu1mfXr5jOXc+++LoRotkVOuf2KXMip/7HlTkRlZXKkenhIqrTtQkENJ+aaxCKdQwgE8iDtmB6ZEBiMJq/dZgvn7qbcMc/SYF3l6YZKSU2L1359CRTeuQ6J6aDml+WHvgtwLH6sIgR9Sjgxid9XlhQ3/8f9UQdR6OpblsBZYn8gYEQ1WRr7H1R3IjENpA7LtburPYyogSk4eSFWR1hkwfiiTJrfwJCPEka28a7MqX0nCKZqzzUOQqXNGPX8W9rU8aA2HcnSPrzLoOV2av9h4icw=
template:
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files

View File

@@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: nextcloud-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: nextcloud
database: nextcloud
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nextcloud-config
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,155 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
ingress:
enabled: false
nextcloud:
host: nextcloud2.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
extraVolumes:
- name: files-nfs
persistentVolumeClaim:
claimName: files-nfs
extraVolumeMounts:
- name: files-nfs
mountPath: /files
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
# configs:
# config.php: |-
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: false
##
## External database configuration
##
externalDatabase:
enabled: true
type: postgresql
host: nextcloud-postgres-rw
database: nextcloud
existingSecret:
enabled: true
secretName: postgres-credentials
usernameKey: username
passwordKey: password
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
cronjob:
enabled: false
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-config
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: false
# disable when upgrading from a previous chart version
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

11
apps/files/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: files-nfs
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /files
- name: config
mountPath: /var/syncthing/config
volumes:
- name: persistence
persistentVolumeClaim:
claimName: files-nfs
- name: config
persistentVolumeClaim:
claimName: syncthing-config

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing2.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
# - syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: syncthing-config
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: UDP

View File

@@ -0,0 +1,16 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token

View File

@@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: spacedrive
spec:
selector:
matchLabels:
app: spacedrive
template:
metadata:
labels:
app: spacedrive
spec:
containers:
- name: spacedrive
image: spacedrive
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 80
volumeMounts:
- name: storage
mountPath: /data
volumes:
- name: storage
persistentVolumeClaim:
claimName: spacedrive-nfs

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: files1
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
images:
- name: spacedrive
newName: ghcr.io/spacedriveapp/spacedrive/server
newTag: 0.2.4

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

11
apps/files1/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: spacedrive-nfs
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi

View File

@@ -1,12 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: finance
name: actualbudget
labels:
app: actualbudget
spec:
# deployment running a single container
selector:
matchLabels:
app: actualbudget
@@ -18,7 +16,7 @@ spec:
spec:
containers:
- name: actualbudget
image: actualbudget/actual-server:latest
image: actualbudget
imagePullPolicy: Always
env:
- name: TZ
@@ -34,67 +32,3 @@ spec:
- name: actualbudget-data-nfs
persistentVolumeClaim:
claimName: actualbudget-data-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
# storageClassName: fast
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
# selector:
# matchLabels:
# directory: "journal-data"
---
apiVersion: v1
kind: Service
metadata:
namespace: finance
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: finance
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: "actualbudget-data-nfs"
spec:
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "actualbudget-data-nfs"
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
volumeName: actualbudget-data-nfs

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP

View File

@@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly-importer
name: firefly-importer
namespace: finance
spec:
selector:
matchLabels:
app: firefly-importer
template:
metadata:
labels:
app: firefly-importer
spec:
containers:
- image: fireflyiii/data-importer:latest
imagePullPolicy: Always
name: firefly-importer
resources: {}
ports:
- containerPort: 8080
env:
- name: FIREFLY_III_ACCESS_TOKEN
value: redacted
- name: FIREFLY_III_URL
value: firefly-http:8080
# - name: APP_URL
# value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
---
apiVersion: v1
kind: Service
metadata:
name: firefly-importer-http
namespace: finance
labels:
app: firefly-importer-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly-importer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-importer-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`importer.finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-importer-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly
name: firefly
namespace: finance
spec:
selector:
matchLabels:
app: firefly
template:
metadata:
labels:
app: firefly
spec:
containers:
- image: fireflyiii/core:latest
imagePullPolicy: Always
name: firefly
resources: {}
ports:
- containerPort: 8080
env:
- name: APP_ENV
value: "local"
- name: APP_KEY
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
- name: DB_CONNECTION
value: sqlite
- name: APP_URL
value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
volumeMounts:
- mountPath: /var/www/html/storage/database
name: firefly-database
volumes:
- name: firefly-database
persistentVolumeClaim:
claimName: firefly-database-nfs
---
apiVersion: v1
kind: Service
metadata:
name: firefly-http
namespace: finance
labels:
app: firefly-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: firefly-database-nfs
labels:
directory: firefly
spec:
# storageClassName: fast
# volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "1G"
nfs:
path: /firefly # inside nfs part.
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: firefly-database-nfs
spec:
resources:
requests:
storage: "1G"
# storageClassName: fast
accessModes:
- ReadOnlyMany
volumeName: firefly-database-nfs

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: finance
resources:
- namespace.yaml
- actualbudget.pvc.yaml
- actualbudget.deployment.yaml
- actualbudget.service.yaml
- actualbudget.ingress.yaml
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.2.0

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homarr
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
helmCharts:
- name: homarr
releaseName: homarr
repo: https://oben01.github.io/charts/
version: 1.0.1
valuesFile: values.yaml

View File

View File

@@ -1,60 +0,0 @@
# -- Default values for homarr
# -- Declare variables to be passed into your templates.
# -- Number of replicas
replicaCount: 1
env:
# -- Your local time zone
TZ: "Europe/Berlin"
# -- Colors and preferences, possible values dark / light
DEFAULT_COLOR_SCHEME: "dark"
# -- Service configuration
service:
# -- Service type
type: ClusterIP
# -- Service port
port: 7575
# -- Service target port
targetPort: 7575
# -- Ingress configuration
ingress:
enabled: false
persistence:
- name: homarr-config
# -- Enable homarr-config persistent storage
enabled: true
# -- homarr-config storage class name
storageClassName: "nfs-client"
# -- homarr-config access mode
accessMode: "ReadWriteOnce"
persistentVolumeReclaimPolicy: Retain
# -- homarr-config storage size
size: "50Mi"
# -- homarr-config mount path inside the pod
mountPath: "/app/data/configs"
- name: homarr-database
# -- Enable homarr-database persistent storage
enabled: true
# -- homarr-database storage class name
storageClassName: "nfs-client"
# -- homarr-database access mode
accessMode: "ReadWriteOnce"
# -- homarr-database storage size
size: "50Mi"
# -- homarr-database mount path inside the pod
mountPath: "/app/database"
- name: homarr-icons
# -- Enable homarr-icons persistent storage
enabled: true
# -- homarr-icons storage class name
storageClassName: "nfs-client"
# -- homarr-icons access mode
accessMode: "ReadWriteOnce"
# -- homarr-icons storage size
size: "50Mi"
# -- homarr-icons mount path inside the pod
mountPath: "/app/public/icons"

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2023.12"
newTag: "2024.2"

View File

@@ -1,16 +1,24 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.sealedsecret.yaml
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.2.0
version: 0.3.1
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.95.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.95.1

25
apps/immich/postgres.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-postgres
spec:
instances: 1
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.2
bootstrap:
initdb:
owner: immich
database: immich
secret:
name: postgres-password
postgresql:
shared_preload_libraries:
- "vectors.so"
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

View File

@@ -2,15 +2,11 @@
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
## Refer there for more detail about the supported values
image:
tag: v1.90.2
# These entries are shared between all the Immich components
env:
REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}'
DB_HOSTNAME: "postgres-postgresql.postgres"
DB_HOSTNAME: "immich-postgres-rw"
DB_USERNAME:
valueFrom:
secretKeyRef:
@@ -26,11 +22,7 @@ env:
secretKeyRef:
name: postgres-password
key: password
TYPESENSE_ENABLED: "{{ .Values.typesense.enabled }}"
TYPESENSE_API_KEY: "{{ .Values.typesense.env.TYPESENSE_API_KEY }}"
TYPESENSE_HOST: '{{ printf "%s-typesense" .Release.Name }}'
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_SERVER_URL: '{{ printf "http://%s-server:3001" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
immich:
@@ -52,18 +44,6 @@ redis:
auth:
enabled: false
typesense:
enabled: true
env:
TYPESENSE_DATA_DIR: /tsdata
TYPESENSE_API_KEY: typesense
persistence:
tsdata:
# Enabling typesense persistence is recommended to avoid slow reindexing
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
# Immich components
server:

View File

@@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres.yaml
- synapse.deployment.yaml
- synapse.service.yaml
- synapse.configmap.yaml
- synapse.ingress.yaml
- postgres-credentials.secret.yaml
- mautrix.pvc.yaml
- mautrix-telegram.statefulset.yaml
- mautrix-telegram.configmap.yaml
- mautrix-whatsapp.statefulset.yaml
namespace: matrix
images:
- name: mautrix-telegram
newName: dock.mau.dev/mautrix/telegram
newTag: "v0.15.1"
- name: mautrix-whatsapp
newName: dock.mau.dev/mautrix/whatsapp
newTag: "v0.10.5"
- name: synapse
newName: ghcr.io/element-hq/synapse
newTag: "v1.100.0"

View File

@@ -0,0 +1,511 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-telegram
data:
config.yaml: |
# Homeserver details
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (for MXIDs, etc).
domain: matrix.kluster.moll.re
# Whether or not to verify the SSL certificate of the homeserver.
# Only applies if address starts with https://
verify_ssl: false
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# Number of retries for all HTTP requests if the homeserver isn't reachable.
http_retry_count: 4
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Whether asynchronous uploads via MSC2246 should be enabled for media.
# Requires a media repo that supports MSC2246.
async_media: false
# Application service host/registration related details
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-telegram:29318
# When using https:// the TLS certificate and key files for the address.
tls_cert: false
tls_key: false
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29317
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
max_body_size: 1
# The full URI to the database. SQLite and Postgres are supported.
# Format examples:
# SQLite: sqlite:filename.db
# Postgres: postgres://username:password@hostname/dbname
database: sqlite:mautrix-telegram.db
# The unique ID of this appservice.
id: telegram
# Username of the appservice bot.
bot_username: telegrambot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
bot_displayname: Telegram bridge bot
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
ephemeral_events: true
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Telegram users.
# {userid} is replaced with the user ID of the Telegram user.
username_template: "telegram_{userid}"
# Localpart template of room aliases for Telegram portal rooms.
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
alias_template: "telegram_{groupname}"
# Displayname template for Telegram users.
# {displayname} is replaced with the display name of the Telegram user.
displayname_template: "{displayname} (Telegram)"
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
# ID is used.
#
# If the bridge is working properly, a phone number or an username should always be known, but
# the other one can very well be empty.
#
# Valid keys:
# "full name" (First and/or last name)
# "full name reversed" (Last and/or first name)
# "first name"
# "last name"
# "username"
# "phone number"
displayname_preference:
- full name
- username
- phone number
# Maximum length of displayname
displayname_max_length: 100
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
# you're on a single-user instance, this should be safe to enable.
allow_avatar_remove: false
# Should contact names and profile pictures be allowed?
# This is only safe to enable on single-user instances.
allow_contact_info: false
# Maximum number of members to sync per portal when starting up. Other members will be
# synced when they send messages. The maximum is 10000, after which the Telegram server
# will not send any more members.
# -1 means no limit (which means it's limited to 10000 by the server)
max_initial_member_sync: 100
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
# If there are more members when trying to create a room, the room creation will be cancelled.
# -1 means no limit (which means all chats can be bridged)
max_member_count: -1
# Whether or not to sync the member list in channels.
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
# list regardless of this setting.
sync_channel_members: false
# Whether or not to skip deleted members when syncing members.
skip_deleted_members: true
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
# their Telegram account at startup.
startup_sync: false
# Number of most recently active dialogs to check when syncing chats.
# Set to 0 to remove limit.
sync_update_limit: 0
# Number of most recently active dialogs to create portals for when syncing chats.
# Set to 0 to remove limit.
sync_create_limit: 15
# Should all chats be scheduled to be created later?
# This is best used in combination with MSC2716 infinite backfill.
sync_deferred_create_all: false
# Whether or not to sync and create portals for direct chats at startup.
sync_direct_chats: false
# The maximum number of simultaneous Telegram deletions to handle.
# A large number of simultaneous redactions could put strain on your homeserver.
max_telegram_delete: 10
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
# at startup and when creating a bridge.
sync_matrix_state: true
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
# out-of-Matrix login website (see appservice.public config section)
allow_matrix_login: true
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
public_portals: false
# Whether or not to use /sync to get presence, read receipts and typing notifications
# when double puppeting is enabled
sync_with_custom_puppets: false
# Whether or not to update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, custom puppets will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
# If using this for other servers than the bridge's server,
# you must also set the URL in the double_puppet_server_map.
login_shared_secret_map:
example.com: foobar
# Set to false to disable link previews in messages sent to Telegram.
telegram_link_preview: true
# Whether or not the !tg join command should do a HTTP request
# to resolve redirects in invite links.
invite_link_resolve: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Maximum size of image in megabytes before sending to Telegram as a document.
image_as_file_size: 10
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
image_as_file_pixels: 16777216
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
# streaming from/to Matrix and using many connections for Telegram.
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
# This option uses internal Telethon implementation details and may break with minor updates.
parallel_file_transfer: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
always_custom_emoji_reaction: false
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (gzipped lottie)
# png - converts to non-animated png (fastest),
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: gif
# Should video stickers be converted to the specified format as well?
convert_from_webm: false
# Arguments for converter. All converters take width and height.
args:
width: 256
height: 256
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Settings for converting animated emoji.
# Same as animated_sticker, but webm is not supported as the target
# (because inline images can only contain images, not videos).
animated_emoji:
target: webp
args:
width: 64
height: 64
fps: 25
# # End-to-bridge encryption support options.
# #
# # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
# encryption:
# # Allow encryption, work in group chat rooms with e2ee enabled
# allow: false
# # Default to encryption, force-enable encryption in all portals the bridge creates
# # This will cause the bridge bot to be in private chats for the encryption to work properly.
# default: false
# # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
# appservice: false
# # Require encryption, drop any unencrypted messages.
# require: false
# # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# # You must use a client that supports requesting keys from other users to use this feature.
# allow_key_sharing: false
# # Options for deleting megolm sessions from the bridge.
# delete_keys:
# # Beeper-specific: delete outbound sessions when hungryserv confirms
# # that the user has uploaded the key to key backup.
# delete_outbound_on_ack: false
# # Don't store outbound sessions in the inbound table.
# dont_store_outbound: false
# # Ratchet megolm sessions forward after decrypting messages.
# ratchet_on_decrypt: false
# # Delete fully used keys (index >= max_messages) after decrypting messages.
# delete_fully_used_on_decrypt: false
# # Delete previous megolm sessions from same device when receiving a new one.
# delete_prev_on_new_session: false
# # Delete megolm sessions received from a device when the device is deleted.
# delete_on_device_delete: false
# # Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
# periodically_delete_expired: false
# # Delete inbound megolm sessions that don't have the received_at field used for
# # automatic ratcheting and expired session deletion. This is meant as a migration
# # to delete old keys prior to the bridge update.
# delete_outdated_inbound: false
# # What level of device verification should be required from users?
# #
# # Valid levels:
# # unverified - Send keys to all device in the room.
# # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# # Note that creating user signatures from the bridge bot is not currently possible.
# # verified - Require manual per-device verification
# # (currently only possible by modifying the `trust` column in the `crypto_device` database table).
# verification_levels:
# # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
# receive: unverified
# # Minimum level that the bridge should accept for incoming Matrix messages.
# send: unverified
# # Minimum level that the bridge should require for accepting key requests.
# share: cross-signed-tofu
# # Options for Megolm room key rotation. These options allow you to
# # configure the m.room.encryption event content. See:
# # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# # more information about that event.
# rotation:
# # Enable custom Megolm room key rotation settings. Note that these
# # settings will only apply to rooms created after this option is
# # set.
# enable_custom: false
# # The maximum number of milliseconds a session should be used
# # before changing it. The Matrix spec recommends 604800000 (a week)
# # as the default.
# milliseconds: 604800000
# # The maximum number of messages that should be sent with a given a
# # session before changing it. The Matrix spec recommends 100 as the
# # default.
# messages: 100
# # Disable rotating keys when a user's devices change?
# # You should not enable this option unless you understand all the implications.
# disable_device_change_key_rotation: false
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Should cross-chat replies from Telegram be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
# been sent to Telegram.
delivery_receipts: false
# Whether or not delivery errors should be reported as messages in the Matrix room.
delivery_error_reports: false
# Should errors in incoming message handling send a message to the Matrix room?
incoming_bridge_error_reports: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it,
# except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
# The favorites tag is `m.favourite`.
pinned_tag: null
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
archive_tag: null
# Whether or not mute status and tags should only be bridged when the portal room is created.
tag_only_on_create: true
# Should leaving the room on Matrix make the user leave on Telegram?
bridge_matrix_leave: true
# Should the user be kicked out of all portals when logging out of the bridge?
kick_on_logout: true
# Should the "* user joined Telegram" notice always be marked as read automatically?
always_read_joined_telegram_notice: true
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
# Requires the user to have sufficient power level and double puppeting enabled.
create_group_on_invite: true
# Settings for backfilling messages from Telegram.
backfill:
# Allow backfilling at all?
enable: true
# Whether or not to enable backfilling in normal groups.
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
# will likely cause problems if there are multiple Matrix users in the group.
normal_groups: false
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
# Set to -1 to let any chat be unread.
unread_hours_threshold: 720
# Forward backfilling limits.
#
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
forward_limits:
# Number of messages to backfill immediately after creating a portal.
initial:
user: 50
normal_group: 100
supergroup: 10
channel: 10
# Number of messages to backfill when syncing chats.
sync:
user: 100
normal_group: 100
supergroup: 100
channel: 100
# Timeout for forward backfills in seconds. If you have a high limit, you'll have to increase this too.
forward_timeout: 900
# Settings for incremental backfill of history. These only apply to Beeper, as upstream abandoned MSC2716.
incremental:
# Maximum number of messages to backfill per batch.
messages_per_batch: 100
# The number of seconds to wait after backfilling the batch of messages.
post_batch_delay: 20
# The maximum number of batches to backfill per portal, split by the chat type.
# If set to -1, all messages in the chat will eventually be backfilled.
max_batches:
# Direct chats
user: -1
# Normal groups. Note that the normal_groups option above must be enabled
# for these to be backfilled.
normal_group: -1
# Supergroups
supergroup: 10
# Broadcast channels
channel: -1
# Overrides for base power levels.
initial_power_level_overrides:
user: {}
group: {}
# Whether to bridge Telegram bot messages as m.notices or m.texts.
bot_messages_as_notices: true
bridge_notices:
# Whether or not Matrix bot messages (type m.notice) should be bridged.
default: false
# List of user IDs for whom the previous flag is flipped.
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
# notices from users listed here will be bridged.
exceptions: []
# An array of possible values for the $distinguisher variable in message formats.
# Each user gets one of the values here, based on a hash of their user ID.
# If the array is empty, the $distinguisher variable will also be empty.
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
# The formats to use when sending messages to Telegram via the relay bot.
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
#
# Available variables:
# $sender_displayname - The display name of the sender (e.g. Example User)
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
# $message - The message content
message_formats:
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
# Telegram user info is available in the following variables:
# $displayname - Telegram displayname
# $username - Telegram username (may not exist)
# $mention - Telegram @username or displayname mention (depending on which exists)
emote_format: "* $mention $formatted_body"
# The formats to use when sending state events to Telegram via the relay bot.
#
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
# In name_change events, `$prev_displayname` is the previous displayname.
#
# Set format to an empty string to disable the messages for that event.
state_event_formats:
join: "$distinguisher <b>$displayname</b> joined the room."
leave: "$distinguisher <b>$displayname</b> left the room."
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
# `filter-mode` management commands.
#
# An empty blacklist will essentially disable the filter.
filter:
# Filter mode to use. Either "blacklist" or "whitelist".
# If the mode is "blacklist", the listed chats will never be bridged.
# If the mode is "whitelist", only the listed chats can be bridged.
mode: blacklist
# The list of group/channel IDs to filter.
list: []
# How to handle direct chats:
# If users is "null", direct chats will follow the previous settings.
# If users is "true", direct chats will always be bridged.
# If users is "false", direct chats will never be bridged.
users: true
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!tg"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Telegram bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Send each message separately (for readability in some clients)
management_room_multiple_messages: false
# Permissions for using the bridge.
# Permitted values:
# relaybot - Only use the bridge via the relaybot, no access to commands.
# user - Relaybot level + access to commands to create bridges.
# puppeting - User level + logging in with a Telegram account.
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
# admin - Full access to use the bridge and some extra administration commands.
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"matrix.kluster.moll.re": "full"
"@remy:matrix.kluster.moll.re": "admin"
# Options related to the message relay Telegram bot.
relaybot:
private_chat:
# List of users to invite to the portal when someone starts a private chat with the bot.
# If empty, private chats with the bot won't create a portal.
invite: []
# Whether or not to bridge state change messages in relaybot private chats.
state_changes: true
# When private_chat_invite is empty, this message is sent to users /starting the
# relaybot. Telegram's "markdown" is supported.
message: This is a Matrix bridge relaybot and does not support direct chats
# List of users to invite to all group chat portals created by the bridge.
group_chat_invite: []
# Whether or not the relaybot should not bridge events in unbridged group chats.
# If false, portals will be created when the relaybot receives messages, just like normal
# users. This behavior is usually not desirable, as it interferes with manually bridging
# the chat to another room.
ignore_unbridged_group_chat: true
# Whether or not to allow creating portals from Telegram.
authless_portals: true
# Whether or not to allow Telegram group admins to use the bot commands.
whitelist_group_admins: true
# Whether or not to ignore incoming events sent by the relay bot.
ignore_own_incoming_events: true
# List of usernames/user IDs who are also allowed to use the bot commands.
whitelist:
- myusername
- 12345678
# Telegram config
telegram:
# Get your own API keys at https://my.telegram.org/apps
api_id: 862555
api_hash: 7387a7b6ba71793d6f3fa98261117e4e
# (Optional) Create your own bot at https://t.me/BotFather
bot_token: disabled
# Should the bridge request missed updates from Telegram when restarting?
catch_up: true
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
sequential_updates: true
exit_on_update_error: false

View File

@@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-telegram
spec:
selector:
matchLabels:
app: mautrix-telegram
serviceName: mautrix-telegram
replicas: 1
template:
metadata:
labels:
app: mautrix-telegram
spec:
containers:
- name: mautrix-telegram
image: mautrix-telegram
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
- name: persistence
mountPath: /data
args:
- --no-update # disable overwriting config.yaml
volumes:
- name: config
configMap:
name: mautrix-telegram
- name: persistence
emptyDir: {}

View File

@@ -0,0 +1,428 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-whatsapp
data:
config.yaml: |
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: matrix.kluster.moll.re
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-whatsapp:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: sqlite3-fk-wal
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: file:/data/mautrix-whatsapp.db?_txlock=immediate
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-whatsapp.log
max_size: 100
max_backups: 10
compress: true

View File

@@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-whatsapp
spec:
selector:
matchLabels:
app: mautrix-whatsapp
serviceName: mautrix-whatsapp
replicas: 1
template:
metadata:
labels:
app: mautrix-whatsapp
spec:
containers:
- name: mautrix-whatsapp
image: mautrix-whatsapp
volumeMounts:
- name: persistence
mountPath: /data
# contains config.yaml
securityContext:
fsGroup: 1337
volumes:
- name: persistence
persistentVolumeClaim:
claimName: mautrix-whatsapp

View File

@@ -0,0 +1,23 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-telegram
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-whatsapp
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

20
apps/matrix/postgres.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: matrix-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: matrix
database: matrix
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

View File

@@ -0,0 +1,62 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: synapse
data:
# matrix.kluster.moll.re.log.config: |
# version: 1
# formatters:
# precise:
# format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
# handlers:
# console:
# class: logging.StreamHandler
# formatter: precise
# loggers:
# # This is just here so we can leave `loggers` in the config regardless of whether
# # we configure other loggers below (avoid empty yaml dict error).
# _placeholder:
# level: "INFO"
# synapse.storage.SQL:
# # beware: increasing this to DEBUG will make synapse log sensitive
# # information such as access tokens.
# level: INFO
# root:
# level: INFO
# handlers: [console]
homeserver.yaml: |
server_name: "matrix.kluster.moll.re"
report_stats: false
# enable_registration: true
# enable_registration_without_verification: true
listeners:
- port: 8448
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
compress: false
# log_config: "./matrix.kluster.moll.re.log.config"
media_store_path: /media_store
trusted_key_servers:
- server_name: "matrix.org"
database:
name: psycopg2
args:
user: matrix
password: "0ssdsdsdM6vbxhs.kdjsdasd9Z0qK5bdTwM6vbxh9Z"
dbname: matrix
host: matrix-postgres-rw
cp_min: 5
cp_max: 10

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: synapse
spec:
selector:
matchLabels:
app: synapse
template:
metadata:
labels:
app: synapse
spec:
containers:
- name: synapse
image: synapse
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 8448
env:
- name: SYNAPSE_CONFIG_PATH
value: /config/homeserver.yaml
volumeMounts:
- name: config
mountPath: /config/homeserver.yaml
subPath: homeserver.yaml
- name: config-persistence
mountPath: /config
- name: media
mountPath: /media_store
securityContext:
fsGroup: 1001
volumes:
- name: config
configMap:
name: synapse
- name: config-persistence
emptyDir: {}
- name: media
emptyDir: {}

View File

@@ -0,0 +1,29 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: synapse-federation
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix.kluster.moll.re`)
kind: Rule
services:
- name: synapse
port: 8448
# auto route to the _matrix path
middlewares:
- name: matrix-redirect
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: matrix-redirect
spec:
redirectRegex:
regex: "^https://matrix.kluster.moll.re/(.*)"
replacement: "https://matrix.kluster.moll.re/_matrix/$${1}"
permanent: true

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: synapse
spec:
selector:
app: synapse
ports:
- protocol: TCP
port: 8448
targetPort: 8448

View File

@@ -26,7 +26,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`media-backend.kluster.moll.re`)
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
middlewares:
- name: jellyfin-websocket
- name: jellyfin-server-headers

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/
# the metrics are available at /metrics but blocked by the ingress

View File

@@ -10,6 +10,7 @@ resources:
- web.deployment.yaml
- web.service.yaml
- ingress.yaml
- jellyfin.servicemonitor.yaml
images:
- name: jellyfin/jellyfin

View File

@@ -20,6 +20,7 @@ spec:
cpu: "2"
ports:
- containerPort: 8096
name: jellyfin
env:
- name: TZ
value: Europe/Berlin

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: Service
metadata:
name: jellyfin-server
labels:
app: jellyfin-server-service
spec:
selector:
app: jellyfin-server

View File

@@ -2,8 +2,6 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
labels:
directory: grafana
spec:
capacity:
storage: "1Gi"
@@ -18,11 +16,10 @@ kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: grafana
volumeName: grafana-nfs

View File

@@ -1,149 +1,9 @@
rbac:
create: true
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
# useExistingRole: name-of-some-(cluster)role
pspEnabled: true
pspUseAppArmor: true
namespaced: false
extraRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
extraClusterRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
serviceAccount:
create: true
name:
nameTest:
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
autoMount: true
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Create HorizontalPodAutoscaler object for deployment type
#
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
readinessProbe:
httpGet:
path: /api/health
port: 3000
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: grafana/grafana
tag: 9.0.2
sha: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
# pullSecrets:
# - myRegistrKeySecretName
testFramework:
enabled: true
image: "bats/bats"
tag: "v1.4.1"
imagePullPolicy: IfNotPresent
securityContext: {}
securityContext:
runAsUser: 472
runAsGroup: 472
fsGroup: 472
containerSecurityContext:
{}
# Extra configmaps to mount in grafana pods
# Values are templated.
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/grafana/ssl/
# subPath: certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraEmptyDirMounts: []
# - name: provisioning-notifiers
# mountPath: /etc/grafana/provisioning/notifiers
# Apply extra labels to common labels.
extraLabels: {}
## Assign a PriorityClassName to pods if set
# priorityClassName:
downloadDashboardsImage:
repository: curlimages/curl
tag: 7.73.0
sha: ""
pullPolicy: IfNotPresent
downloadDashboards:
env: {}
envFromSecret: ""
resources: {}
## Pod Annotations
# podAnnotations: {}
## Pod Labels
# podLabels: {}
podPortName: grafana
## Deployment annotations
# annotations: {}
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
@@ -163,128 +23,10 @@ serviceMonitor:
## https://github.com/coreos/prometheus-operator
##
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 1m
scheme: http
tlsConfig: {}
scrapeTimeout: 30s
relabelings: []
extraExposePorts: []
# - name: keycloak
# port: 8080
# targetPort: 8080
# type: ClusterIP
# overrides pod.spec.hostAliases in the grafana deployment's pods
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "my.host.com"
ingress:
enabled: true
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {
kubernetes.io/ingress.class: nginx,
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
# pathType is only for k8s >= 1.1=
pathType: Prefix
hosts:
- grafana.kluster.moll.re
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
tls:
- hosts:
- grafana.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Additional init containers (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
extraInitContainers: []
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
extraContainers: ""
# extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## Volumes that can be used in init containers that will not be mounted to deployment pods
extraContainerVolumes: []
# - name: volume-from-secret
# secret:
# secretName: secret-to-mount
# - name: empty-dir-volume
# emptyDir: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
enabled: false
persistence:
type: pvc
enabled: true
@@ -318,556 +60,6 @@ initChownData:
##
enabled: true
## initChownData container image
##
image:
repository: busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
## initChownData resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword
# Use an existing secret for the admin user.
admin:
## Name of the secret. Can be templated.
existingSecret: ""
userKey: admin-user
passwordKey: admin-password
## Define command to be executed at startup by grafana container
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
## Default is "run.sh" as defined in grafana's Dockerfile
# command:
# - "sh"
# - "/run.sh"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Extra environment variables that will be pass onto deployment pods
##
## to provide grafana with access to CloudWatch on AWS EKS:
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
## same oidc eks provider as noted before (same as the existing line)
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
##
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
##
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
##
## env:
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
## AWS_REGION: us-east-1
##
## 5. uncomment the EKS section in extraSecretMounts: below
## 6. uncomment the annotation section in the serviceAccount: above
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
env: {}
## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
## Renders in container spec as:
## env:
## ...
## - name: <key>
## valueFrom:
## <value rendered as YAML>
envValueFrom: {}
# ENV_NAME:
# configMapKeyRef:
# name: configmap-name
# key: value_key
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc. Value is templated.
envFromSecret: ""
## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envRenderSecret: {}
## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
## Name is templated.
envFromSecrets: []
## - name: secret-name
## optional: true
## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key.
## Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
envFromConfigMaps: []
## - name: configmap-name
## optional: true
# Inject Kubernetes services as environment variables.
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
enableServiceLinks: true
## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: grafana-secret-files
# readOnly: true
# subPath: ""
#
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
# - name: aws-iam-token
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
# readOnly: true
# projected:
# defaultMode: 420
# sources:
# - serviceAccountToken:
# audience: sts.amazonaws.com
# expirationSeconds: 86400
# path: token
#
# for CSI e.g. Azure Key Vault use the following
# - name: secrets-store-inline
# mountPath: /run/secrets
# readOnly: true
# csi:
# driver: secrets-store.csi.k8s.io
# readOnly: true
# volumeAttributes:
# secretProviderClass: "akv-grafana-spc"
# nodePublishSecretRef: # Only required when using service principal mode
# name: grafana-akv-creds # Only required when using service principal mode
## Additional grafana server volume mounts
# Defines additional volume mounts.
extraVolumeMounts: []
# - name: extra-volume-0
# mountPath: /mnt/volume0
# readOnly: true
# existingClaim: volume-claim
# - name: extra-volume-1
# mountPath: /mnt/volume1
# readOnly: true
# hostPath: /usr/shared/
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
lifecycleHooks: {}
# postStart:
# exec:
# command: []
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: Prometheus
# type: prometheus
# url: http://prometheus-prometheus-server
# access: proxy
# isDefault: true
# - name: CloudWatch
# type: cloudwatch
# access: proxy
# uid: cloudwatch
# editable: false
# jsonData:
# authType: default
# defaultRegion: us-east-1
## Configure notifiers
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
##
notifiers: {}
# notifiers.yaml:
# notifiers:
# - name: email-notifier
# type: email
# uid: email1
# # either:
# org_id: 1
# # or
# org_name: Main Org.
# is_default: true
# settings:
# addresses: an_email_address@example.com
# delete_notifiers:
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
# providers:
# - name: 'default'
# orgId: 1
# folder: ''
# type: file
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# custom-dashboard:
# file: dashboards/custom-dashboard.json
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
# token: ''
# local-dashboard-base64:
# url: https://example.com/repository/test-b64.json
# token: ''
# b64content: true
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
paths:
data: /var/lib/grafana/
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## grafana Authentication can be enabled with the following values on grafana.ini
# server:
# The full public facing url you use in browser, used for redirects and emails
# root_url:
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
# auth.github:
# enabled: false
# allow_sign_up: false
# scopes: user:email,read:org
# auth_url: https://github.com/login/oauth/authorize
# token_url: https://github.com/login/oauth/access_token
# api_url: https://api.github.com/user
# team_ids:
# allowed_organizations:
# client_id:
# client_secret:
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
# auth.ldap:
# enabled: true
# allow_sign_up: true
# config_file: /etc/grafana/ldap.toml
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
enabled: false
# `existingSecret` is a reference to an existing secret containing the ldap configuration
# for Grafana in a key `ldap-toml`.
existingSecret: ""
# `config` is the content of `ldap.toml` that will be stored in the created secret
config: ""
# config: |-
# verbose_logging = true
# [[servers]]
# host = "my-ldap-server"
# port = 636
# use_ssl = true
# start_tls = false
# ssl_skip_verify = false
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: ""
userKey: "user"
passwordKey: "password"
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.15.6
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
securityContext: {}
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
readinessProbe: {}
livenessProbe: {}
dashboards:
enabled: false
SCProvider: true
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# value of label that the configmaps with dashboards are set to
labelValue: null
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
folder: /tmp/dashboards
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
defaultFolderName: null
# Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces.
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
# Absolute path to shell script to execute after a configmap got reloaded
script: null
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600
#
# watchClientTimeout: is a client-side timeout, configuring your local socket.
# If you have a network outage dropping all packets with no RST/FIN,
# this is how long your client waits before realizing & dropping the connection.
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# provider configuration that lets grafana manage the dashboards
provider:
# name of the provider, should be unique
name: sidecarProvider
# orgid as configured in grafana
orgid: 1
# folder in which the dashboards should be imported in grafana
folder: ''
# type of the provider
type: file
# disableDelete to activate a import-only behaviour
disableDelete: false
# allow updating provisioned dashboards from the UI
allowUiUpdates: false
# allow Grafana to replicate dashboard structure from filesystem
foldersFromFilesStructure: false
# Additional dashboard sidecar volume mounts
extraMounts: []
# Sets the size limit of the dashboard sidecar emptyDir volume
sizeLimit: {}
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
# value of label that the configmaps with datasources are set to
labelValue: null
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload datasources
reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any datasources defined at startup time.
initDatasources: false
# Sets the size limit of the datasource sidecar emptyDir volume
sizeLimit: {}
plugins:
enabled: false
# label that the configmaps with plugins are marked with
label: grafana_plugin
# value of label that the configmaps with plugins are set to
labelValue: null
# If specified, the sidecar will search for plugin config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# Endpoint to send request to reload plugins
reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any plugins defined at startup time.
initPlugins: false
# Sets the size limit of the plugin sidecar emptyDir volume
sizeLimit: {}
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
label: grafana_notifier
# If specified, the sidecar will search for notifier config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
# Sets the size limit of the notifier sidecar emptyDir volume
sizeLimit: {}
## Override the deployment namespace
##
namespaceOverride: ""
## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10
## Add a seperate remote image renderer deployment/service
imageRenderer:
# Enable the image-renderer deployment & service
enabled: false
replicas: 1
image:
# image-renderer Image repository
repository: grafana/grafana-image-renderer
# image-renderer Image tag
tag: latest
# image-renderer Image sha (optional)
sha: ""
# image-renderer ImagePullPolicy
pullPolicy: Always
# extra environment variables
env:
HTTP_HOST: "0.0.0.0"
# RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
# RENDERING_MODE: clustered
# IGNORE_HTTPS_ERRORS: true
# image-renderer deployment serviceAccount
serviceAccountName: ""
# image-renderer deployment securityContext
securityContext: {}
# image-renderer deployment Host Aliases
hostAliases: []
# image-renderer deployment priority class
priorityClassName: ''
service:
# Enable the image-renderer service
enabled: true
# image-renderer service port name
portName: 'http'
# image-renderer service port used by both service and deployment
port: 8081
targetPort: 8081
# If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
grafanaProtocol: http
# In case a sub_path is used this needs to be added to the image renderer callback
grafanaSubPath: ""
# name of the image-renderer port on the pod
podPortName: http
# number of image-renderer replica sets to keep
revisionHistoryLimit: 10
networkPolicy:
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
limitIngress: true
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
limitEgress: false
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# Create a dynamic manifests via values:
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"
# kind: ExternalSecret
# metadata:
# name: grafana-secrets
# spec:
# backendType: gcpSecretsManager
# data:
# - key: grafana-admin-password
# name: adminPassword

View File

@@ -2,13 +2,9 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
labels:
directory: influxdb
spec:
# storageClassName: slow
capacity:
storage: "10Gi"
# volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
@@ -26,6 +22,4 @@ spec:
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: influxdb
volumeName: influxdb-nfs

View File

@@ -8,12 +8,17 @@ resources:
- grafana.pvc.yaml
- influxdb.pvc.yaml
- grafana.ingress.yaml
# prometheus-operator crds
- https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.70.0/bundle.yaml
- prometheus.yaml
- thanos-objstore-config.sealedsecret.yaml
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 6.56.2
version: 7.3.0
valuesFile: grafana.values.yaml
- releaseName: influxdb
@@ -25,5 +30,5 @@ helmCharts:
- releaseName: telegraf-speedtest
name: telegraf
repo: https://helm.influxdata.com/
version: 1.8.27
version: 1.8.39
valuesFile: telegraf-speedtest.values.yaml

View File

@@ -0,0 +1,75 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitoring # needs to be the same as in the kustomization.yaml
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
resources:
requests:
memory: 400Mi
serviceAccountName: prometheus
enableAdminAPI: false
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
thanos:
version: v0.33.0
objectStorageConfig:
# loads the config from a secret named thanos-objstore-config in the same namespace
key: thanos.yaml
name: thanos-objstore-config
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
selector:
prometheus: prometheus

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
spec:
encryptedData:
thanos.yaml: AgCXlr7NO2DoH1R0ngtDFi8rgJaDnW5WSmOMjvXF4GMcEjnn1kwQMLkF0Xz1BUB5GlQkTAg+ZjCWGMlfycBmUnZb+koZK3X1YLsk1BxBxtuSqhj35iQYxKQ7rAlsz7FxUQjK2oiJkFeQmo/rwcw6l6vZJ73+THYSebR9mLQ/H0pnmJM3ldLX4iWL2H8BZ7ftOYdXO7Xv0lk2k2L4O4LgnB1Uedpyk0HLVxAv3VdVU/RFpHm5Q7kudrCMm9ENcJG7qIWuii8GkysvEefbo2phgKn1Zr5XR6SyekuW2e6FyHe9us5Pv5HnJ6Z2+ZyewygaGgHiRqtxRMaLbahICewfSHwyGzeAD2kdgwVyJYXxVPV9qKQvZmj0ZDCDZ5K548mSUq7nNXSI9M9AJBTKUoqb2FXK3pqn4yh9M1l+7Pmno5Fs22blAyGsRqO32GxrYvEXPpdSeqHRjOMYTnbPuteGRKcvmSEUSuHzkeoTzU1Jh4Sg0ygtQUNIKtbwhJm1XpbJ0oaR5ukWMxPfpDv+B5FmrDsU/I+o62+NtCLQLkK6MoRBFiJ1kymtKkM3vQ1CVg4Vtc5Gc2D6mMu5K8kEuUODweBb8qPnYH7ULfTYORldj3d+Fb2mGF5mAU6xHMzbocsdgZpbAzUP/FfJmMMDWf4aW3LJ1mBjUD06KAwPsQvbTm6VInrdXh2QVb4UIp41kbyK8sanHrvh3bprHloxt8OnTZ2HQl+XN+kxYirkVkL34lIlk7KdYCWqO7QqH0ncd9WF0f9mpPGbxo3J
template:
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
type: Opaque

View File

@@ -23,3 +23,29 @@ spec:
requests:
storage: "150Gi"
volumeName: nextcloud-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-syncthing-shared
spec:
capacity:
storage: "150Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/syncthing
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-syncthing-shared
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
volumeName: nextcloud-syncthing-shared

17
apps/nextcloud/readme.md Normal file
View File

@@ -0,0 +1,17 @@
## Running occ commands
Sometimes you need to run a command on the Nextcloud container directly. You can do that by running commands as the user www-data via the kubectl exec command.
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ myocccomand"
```
Here are some examples below.
Putting Nextcloud into maintanence mode
Some admin actions require you to put your Nextcloud instance into
(e.g. backups):
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ maintenance:mode --on"
```

View File

@@ -1,9 +1,6 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
image:
tag: "28"
ingress:
enabled: false
@@ -49,6 +46,15 @@ nextcloud:
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
extraVolumes:
- name: my-volume
persistentVolumeClaim:
claimName: nextcloud-nfs
extraVolumeMounts:
- name: my-volume
mountPath: /var/www/html/my-volume
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
@@ -74,8 +80,7 @@ nginx:
enabled: false
internalDatabase:
enabled: true
name: nextcloud
enabled: false
##
## External database configuration
@@ -89,13 +94,7 @@ externalDatabase:
## Database host
host: postgres-postgresql.postgres
## Database user
# user: nextcloud
# ## Database password
# password: test
## Database name
database: nextcloud
## Use a existing secret

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
spec:
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: mealie
image: mealie
resources:
limits:
memory: "500Mi"
cpu: "500m"
ports:
- containerPort: 9000
env:
- name: ALLOW_SIGNUP
value: "true"
- name: TZ
value: Europe/Paris
- name: BASE_URL
value: https://recipes.kluster.moll.re
volumeMounts:
- name: mealie-data
mountPath: /app/data
volumes:
- name: mealie-data
persistentVolumeClaim:
claimName: mealie-data

View File

@@ -1,15 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: homarr-ingress
name: mealie-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`start.kluster.moll.re`)
- match: Host(`recipes.kluster.moll.re`)
kind: Rule
services:
- name: homarr
port: 7575
- name: mealie-web
port: 9000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: recipes
resources:
- namespace.yaml
- deployment.yaml
- pvc.yaml
- service.yaml
- ingress.yaml
images:
- name: mealie
newTag: v1.2.0
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/recipes/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie-data
spec:
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce

10
apps/recipes/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie-web
spec:
selector:
app: mealie
ports:
- port: 9000
targetPort: 9000

View File

@@ -14,4 +14,3 @@ spec:
port: 80
tls:
certResolver: default-tls

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /var/syncthing
volumes:
- name: persistence
persistentVolumeClaim:
claimName: syncthing-claim

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: syncthing
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
- syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

11
apps/syncthing/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: syncthing-claim
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: UDP

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token
namespace: syncthing

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
spec:
encryptedData:
token: AgC1hG1aguLIWBgA1R19MGrXDq7BONAldMEXtCeGXLO9Xar08f7qFqprtRJAMOID4trUEBMAkF96m7rH7QHTpO0WzRLrJctLi7U6NgESUJBDxusqjij3RAANS69Xt27mu2oa+rhm605CfFJT6Gpx/2CxrFtUD3yCijilDnEVvw4WvTLHvVQMCd8cM8ZDlpBsSYbxvtCUN1+B02DCucLpMphspxV2SGPAdc04xQD7d0vUhNLekFi0xSgu0jiRGVDHOG5Egd9d/BGeNOBgiUVxJxqqdXc6EmkslcSUtMQJ5luSxjogf+p3jdOqt4aPpUeR8sSPb6OSEIZD/Cfs9X4akHdpUAqkycu+V24lDxeHWAtIviCMBPttrwNAEytgwqaiT0U4UmL5GqR97jpmy3Tx+jYKuXkt4Igb6VByreuL9aZacRrqRhCCgbg95Y/UrYlLAbZYOI/+KsFzB5akGpZXUDcW9h2IkTUmcT+QxWXqEoNpoTI5qAnKiu/9T5elDKghjMHYX+CnPj+rXlQIJzX7NkZ0Q6HpKQ4B2Vd1Ewkvadf963jBodUe7WiMt8UeYgzCa33F4U23JjExIrL8t3r8MQ/IIdtfUvyz6Da1vp5hjpBUnUCk8rca/6VC3GO1GP3DLdIXiZQY1OOTHJlyLG7+bIL35zVfkmLMzmlIdaFsfeYiL4P+hYRbLABPAJk8lY7MEdiczpvI9HlmFVatJaPrFJwx9jyhzqIOq5eGt0OIkFt+fw==
template:
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
type: Opaque

View File

@@ -3,11 +3,13 @@ kind: IngressRoute
metadata:
namespace: whoami
name: whoami-ingressroute
annotations:
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.kluster.moll.re`)
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`)
kind: Rule
services:
- name: whoami

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: cloudflare-api
namespace: external-dns
spec:
encryptedData:
CLOUDFLARE_TOKEN: AgA+HAbpfu4MUK152g2XuWnCoLflCgp4C5gpUWy/IRCETyhcaP5SefAPGC/TGdGPTsZWoDO9qIDAdEiFw4Aw5idfBOm1Ql7vLunWeqNysirU9QJIbL21Fb8+UafrLnAGQySjzAT4MyK5yntn3T8l568cR22jPQi5a0CqL91jGXBeANkTQlokMFJCYkYsaqhCirSDlldvVrJGlWg+T1odyqyytIOO9OaYNt0jA1NisKpLWcLPwYcVkf0ntdCSQaloMX/LeoY16kECOVMYrVPIqGbOhCvAehpjyXxydFjyaYIV5p5hlKD0Sjlpc9zTCTFF7KUddNU9m2GhJqKT8bZm0d2g1yth4dNLgbUSp1nU31vpRalRJYXBVwPVei0lSGL7Jkb9LzCRHxL4J8hP/AeYrntpoAqMDxZsMZSpUnbTQklT2WyvIzyhpiNtEFrH8P6CYq61dWENXWkMwDqzKfM7Xlg9ifW6YzTQfsoo/OWhtWRmLDNVrXwhZqRWb7UjYr6xGPAzc/I4H2SJk5HLubylaXY3I2X2dWy+YTttiUuzQl0YfzrADAlu8ZWPiAfLqGmKnOR0STqeCvAT6ya8Ky09aY5GWLdTJfTayivGA0PvRJE2idf/VtpVDBERN5lCDHLBrRvU8o5wBlkTxU4B8zHmF+pu7zO3bA8IOUpyQMnlFzZTOI1s0Tl6XiRmy+WnA4tyxjrEse75BvS3WGCnaezQJW9gpF1/UC18vys46f9Jn5FHfm4lMpOscSIy
template:
metadata:
creationTimestamp: null
name: cloudflare-api
namespace: external-dns
type: Opaque

View File

@@ -0,0 +1,63 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: octodns-cronjob
spec:
schedule: "0 */6 * * *"
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
spec:
initContainers:
- name: git
image: git
command: ["git"]
args:
- clone
- https://git.kluster.moll.re/remoll/dns.git
- /etc/octodns
volumeMounts:
- name: octodns-config
mountPath: /etc/octodns
containers:
- name: octodns
image: octodns
env:
# - name: CLOUDFLARE_ACCOUNT_ID
# valueFrom:
# secretKeyRef:
# name: cloudflare-api
# key: CLOUDFLARE_ACCOUNT_ID
- name: CLOUDFLARE_TOKEN
valueFrom:
secretKeyRef:
name: cloudflare-api
key: CLOUDFLARE_TOKEN
# - name: CLOUDFLARE_EMAIL
# valueFrom:
# secretKeyRef:
# name: cloudflare-api
# key: CLOUDFLARE_EMAIL
command: ["/bin/sh", "-c"]
args:
- >-
cd /etc/octodns
&&
pip install -r ./requirements.txt
&&
octodns-sync --config-file ./config.yaml --doit
&&
echo "done..."
volumeMounts:
- name: octodns-config
mountPath: /etc/octodns
volumes:
- name: octodns-config
emptyDir: {}
restartPolicy: Never

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: external-dns
resources:
- namespace.yaml
- cloudflare-api.sealedsecret.yaml
- cronjob.yaml
images:
- name: octodns
newName: octodns/octodns # has all plugins
newTag: "2024.02"
- name: git
newName: alpine/git
newTag: "2.43.0"

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -22,6 +22,8 @@ spec:
value: ":80"
- name: DRONE_GITEA_SERVER
value: https://git.kluster.moll.re
- name: DRONE_USER_CREATE
value: username:remoll,admin:true
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:

View File

@@ -6,7 +6,8 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`git.kluster.moll.re`)
# block the metrics endpoint
- match: Host(`git.kluster.moll.re`) && !Path(`/metrics`)
kind: Rule
services:
- name: gitea-http

View File

@@ -0,0 +1,10 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: gitea-servicemonitor
spec:
endpoints:
- port: http
selector:
matchLabels:
app.kubernetes.io/name: gitea

View File

@@ -98,15 +98,9 @@ gitea:
# this user needs to stay admin, and active. But we change its password to be unusable
password: changedalready
email: "gitea@delete.me"
## @param gitea.metrics.enabled Enable Gitea metrics
## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor
metrics:
enabled: false
serviceMonitor:
enabled: false
# additionalLabels:
# prometheus-release: prom1
enabled: true
## @param gitea.config Configuration for the Gitea server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/)
config:

View File

@@ -4,6 +4,7 @@ resources:
- namespace.yaml
- gitea.pvc.yaml
- gitea.ingress.yaml
- gitea.servicemonitor.yaml
- drone-kube-runner.deployment.yaml
- drone-server.deployment.yaml
- drone-server.sealedsecret.yaml
@@ -15,6 +16,6 @@ helmCharts:
- name: gitea
namespace: gitea # needs to be set explicitly for svc to be referenced correctly
releaseName: gitea
version: 9.6.1
version: 10.1.3
valuesFile: gitea.values.yaml
repo: https://dl.gitea.io/charts/

View File

@@ -1,2 +0,0 @@
name: metallb
chart: metallb/metallb

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
spec:
secretTemplates:
- name: secret-1
labels:
label1: value1
annotations:
key1: value1
stringData:
data-name0: data-value0
data:
data-name1: ZGF0YS12YWx1ZTE=

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ipaddresspool.yaml
namespace: metallb-system
helmCharts:
- name: metallb
repo: https://metallb.github.io/metallb
version: 0.14.3
releaseName: metallb
valuesFile: values.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,191 +1,18 @@
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# existingConfigMap: "config"
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
rbacPrometheus: false
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
enabled: true
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
@@ -193,59 +20,7 @@ controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
@@ -254,83 +29,7 @@ speaker:
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: false
image:
repository: quay.io/frrouting/frr
tag: 7.5.1
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true

View File

@@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
namespace: pg-ha
helmCharts:
- name: cloudnative-pg
releaseName: pg-controller
version: 0.19.1
valuesFile: values.yaml
repo: https://cloudnative-pg.io/charts/

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -0,0 +1,42 @@
replicaCount: 1
# -- Operator configuration.
config:
# -- Specifies whether the secret should be created.
create: true
# -- The name of the configmap/secret to use.
name: cnpg-controller-manager-config
# -- Specifies whether it should be stored in a secret, instead of a configmap.
secret: false
# -- The content of the configmap/secret, see
# https://cloudnative-pg.io/documentation/current/operator_conf/#available-options
# for all the available options.
data: {}
# INHERITED_ANNOTATIONS: categories
# INHERITED_LABELS: environment, workload, app
# WATCH_NAMESPACE: namespace-a,namespace-b
serviceAccount:
# -- Specifies whether the service account should be created.
create: true
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
rbac:
# -- Specifies whether ClusterRole and ClusterRoleBinding should be created.
create: true
# -- Aggregate ClusterRoles to Kubernetes default user-facing roles.
# Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
aggregateClusterRoles: false
# -- Container Security Context.
service:
type: ClusterIP
# -- DO NOT CHANGE THE SERVICE NAME as it is currently used to generate the certificate
# and can not be configured
name: cnpg-webhook-service
port: 443

Some files were not shown because too many files have changed in this diff Show More