123 Commits

Author SHA1 Message Date
d143a90228 testing a few sample (joint) configurations 2024-03-03 20:35:37 +01:00
1ad56fd27e Merge pull request 'Update Helm release traefik to v26.1.0' (#42) from renovate/traefik-26.x into main
Reviewed-on: #42
2024-03-03 19:33:13 +00:00
773a155627 Update Helm release traefik to v26.1.0 2024-03-03 19:33:13 +00:00
61945b3507 Merge pull request 'Update Helm release metallb to v0.14.3' (#34) from renovate/metallb-0.x into main
Reviewed-on: #34
2024-03-03 19:32:16 +00:00
4aa21cb0cd Update Helm release metallb to v0.14.3 2024-03-03 19:32:16 +00:00
d233ab96eb Merge pull request 'Update Helm release gitea to v10.1.3' (#46) from renovate/gitea-10.x into main
Reviewed-on: #46
2024-03-03 19:31:04 +00:00
df581e0110 Update Helm release gitea to v10.1.3 2024-03-03 19:31:04 +00:00
8a114b9384 remove homarr 2024-03-03 20:30:06 +01:00
ab6506f4f2 update immich 2024-02-21 18:35:13 +01:00
87242d293a Merge pull request 'Update Helm release homarr to v1.0.6' (#38) from renovate/homarr-1.x into main
Reviewed-on: #38
2024-02-13 10:34:15 +00:00
11d46ec295 Merge pull request 'Update Helm release gitea to v10.1.1' (#35) from renovate/gitea-10.x into main
Reviewed-on: #35
2024-02-13 10:33:42 +00:00
1b3702c4c8 Update Helm release gitea to v10.1.1 2024-02-13 10:33:42 +00:00
9b68b4a915 lets be more generous with memory 2024-02-11 18:15:11 +01:00
18889d7391 add other recipes 2024-02-11 11:28:30 +01:00
a38ad1d7e6 bye bye 2024-02-10 19:35:22 +01:00
edcb9158f5 what now? 2024-02-10 19:21:04 +01:00
71b1c252f3 turns out it was important 2024-02-10 19:17:28 +01:00
b30f44d2c6 last chance 2024-02-10 19:16:08 +01:00
85abf0fda6 with services? 2024-02-10 19:04:08 +01:00
5e21ceaad3 lets try this 2024-02-10 18:58:20 +01:00
3f5c1a5a5c add configmap 2024-02-10 10:56:59 +01:00
0195833fc3 service account not needed 2024-02-10 10:54:41 +01:00
64835e16de slight fix 2024-02-10 10:53:20 +01:00
4e11a33855 correct backend 2024-02-10 10:46:38 +01:00
bad024861a add recipes 2024-02-10 10:45:53 +01:00
fe5d6a9014 Merge pull request 'Update adguard/adguardhome Docker tag to v0.107.44' (#39) from renovate/adguard-adguardhome-0.x into main
Reviewed-on: #39
2024-02-08 09:24:43 +00:00
f2898d7e0b Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024.2' (#40) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #40
2024-02-08 09:24:05 +00:00
f67f0c8889 Update homeassistant/home-assistant Docker tag to v2024.2 2024-02-07 21:02:14 +00:00
0ccb17d8e1 Update adguard/adguardhome Docker tag to v0.107.44 2024-02-07 11:01:45 +00:00
bb6d417937 Merge pull request 'Update actualbudget/actual-server Docker tag to v24.2.0' (#36) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #36
2024-02-07 10:09:46 +00:00
4e2ebe2540 Merge pull request 'Update octodns/octodns Docker tag to v2024' (#37) from renovate/octodns-octodns-2024.x into main
Reviewed-on: #37
2024-02-07 10:09:26 +00:00
c5310b0f00 Update Helm release homarr to v1.0.6 2024-02-04 17:01:35 +00:00
46ef973f70 Update octodns/octodns Docker tag to v2024 2024-02-03 22:02:18 +00:00
c12d2dc7a6 whoopsie 2024-02-03 22:27:29 +01:00
e28c6ffd52 add physics 2024-02-03 22:19:09 +01:00
7ba6860ea0 Update actualbudget/actual-server Docker tag to v24.2.0 2024-02-03 21:01:51 +00:00
33c23ee42b Merge pull request 'Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1' (#31) from renovate/ghcr.io-immich-app-immich-machine-learning-1.x into main
Reviewed-on: #31
2024-02-03 20:58:07 +00:00
b2f8c8bced Merge branch 'main' into renovate/ghcr.io-immich-app-immich-machine-learning-1.x 2024-02-03 20:57:54 +00:00
d5277d3d6a Merge pull request 'Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1' (#32) from renovate/ghcr.io-immich-app-immich-server-1.x into main
Reviewed-on: #32
2024-02-03 20:56:19 +00:00
e3c90f5ede Merge branch 'main' into renovate/ghcr.io-immich-app-immich-server-1.x 2024-02-03 20:55:47 +00:00
eb5bda63db Merge pull request 'Update Helm release grafana to v7.3.0' (#26) from renovate/grafana-7.x into main
Reviewed-on: #26
2024-02-03 20:54:45 +00:00
a10a216f0e Update ghcr.io/immich-app/immich-server Docker tag to v1.94.1 2024-01-31 20:01:05 +00:00
3cf9fd0b87 Update ghcr.io/immich-app/immich-machine-learning Docker tag to v1.94.1 2024-01-31 20:01:03 +00:00
ea1fa1637f Update Helm release grafana to v7.3.0 2024-01-30 15:00:50 +00:00
96abe2a0f5 auto admin 2024-01-23 18:16:40 +01:00
9623f33b59 Merge pull request 'Update Helm release gitea to v10' (#16) from renovate/gitea-10.x into main
Reviewed-on: #16
2024-01-22 10:30:17 +00:00
b065fc7e59 idioto 2024-01-22 11:27:58 +01:00
617ed5601c allow renovate to fetch release notes 2024-01-22 11:11:34 +01:00
7e21ce4181 Update Helm release gitea to v10 2024-01-22 10:00:35 +00:00
eeaed091ab Merge pull request 'Update Helm release metallb to v0.13.12' (#30) from renovate/metallb-0.x into main
Reviewed-on: #30
2024-01-16 08:59:45 +00:00
ee52d2b777 Update Helm release metallb to v0.13.12 2024-01-15 19:00:31 +00:00
384e9fbaec no service account needed 2024-01-15 19:12:19 +01:00
606aded35f argo manage metallb 2024-01-15 19:03:49 +01:00
a3aa8888e9 or like that? 2024-01-14 17:31:24 +01:00
aaeb43e9c3 let's check if we get ips like that 2024-01-14 17:27:37 +01:00
a9b1d02a7e keeping some ips here 2024-01-14 17:22:57 +01:00
76b49270eb fix type 2024-01-14 12:58:42 +01:00
9b57715f92 bad yaml 2024-01-14 12:56:23 +01:00
85a96cf87b bump version 2024-01-14 12:54:33 +01:00
78b4be8fbd next try 2024-01-14 12:51:14 +01:00
7bc10b57ce lets try adding thanos 2024-01-14 12:41:03 +01:00
de26a052e8 QOL improvements 2024-01-11 22:05:05 +01:00
28ff769757 Deploy full on octodns 2024-01-11 21:57:02 +01:00
6a58ea337e forgot secret 2024-01-11 21:38:24 +01:00
2af279c161 still crashes, now due to auth 2024-01-11 21:37:29 +01:00
c26997ff83 single run only 2024-01-11 18:39:13 +01:00
a354464f6e try with local directory 2024-01-11 18:26:37 +01:00
268a9f3a7a correct env vars and labels 2024-01-11 18:12:12 +01:00
4ddeaf6c99 try this 2024-01-11 18:08:35 +01:00
b6f9a818af Execute 2nd command as well 2024-01-11 18:04:55 +01:00
f4670aa471 Add ddns 2024-01-11 17:59:56 +01:00
72a2914c24 correct git target 2024-01-11 17:52:29 +01:00
1d5bc8a9c1 why? 2024-01-11 17:51:01 +01:00
892c412fd9 let's tune it down 2024-01-11 17:46:25 +01:00
b6f7ead955 whoopsie 2024-01-11 17:44:58 +01:00
f033ba16eb correct version 2024-01-11 17:43:31 +01:00
f3ae2c424b use octodns 2024-01-11 17:42:35 +01:00
36035ee84d bump immich version 2024-01-11 10:08:12 +01:00
50679b400a Merge pull request 'Update actualbudget/actual-server Docker tag to v24' (#28) from renovate/actualbudget-actual-server-24.x into main
Reviewed-on: #28
2024-01-10 16:08:35 +00:00
a68fb5f0a7 Update actualbudget/actual-server Docker tag to v24 2024-01-10 13:00:43 +00:00
5792367b8b Add finance to auto deploy 2024-01-10 13:15:42 +01:00
3699b79f1a let's try these monitorings 2024-01-08 15:48:38 +01:00
e473abda12 Merge pull request 'Update Helm release grafana to v7.0.21' (#25) from renovate/grafana-7.x into main
Reviewed-on: #25
2024-01-08 13:01:14 +00:00
f67f586006 Update Helm release grafana to v7.0.21 2024-01-08 10:00:33 +00:00
61e1276f02 maybe like that 2024-01-07 12:30:51 +01:00
111fd35fc3 needed? 2024-01-07 12:18:06 +01:00
cc4148fb8a correct crds 2024-01-07 12:16:47 +01:00
f1e624985f come on 2024-01-07 12:15:10 +01:00
c8d7d3c854 use traefik 2024-01-07 12:12:46 +01:00
4880503609 Is actually a token 2024-01-07 12:06:53 +01:00
f905ce1611 maybe it wes a token actually? 2024-01-07 12:05:42 +01:00
ecfc65ecdd try like this? 2024-01-07 11:59:41 +01:00
7da1d705a4 update authorization 2024-01-07 11:51:20 +01:00
299cbea97e change ingress slightly 2024-01-07 11:41:05 +01:00
b633d61920 update whoami 2024-01-07 11:39:10 +01:00
bfb8244e59 made a dum dum 2024-01-07 11:37:38 +01:00
33c2df9fa3 add external dns 2024-01-07 11:35:52 +01:00
3d84d6bed1 does servicemonitor accept this? 2024-01-04 18:29:18 +01:00
cf6a931097 fix port names 2024-01-04 18:27:03 +01:00
53c3865072 fix label syntax 2024-01-04 18:23:32 +01:00
d09a3509af trying to monitor syncthing 2024-01-04 18:21:26 +01:00
8c0abc16c4 Merge pull request 'Update homeassistant/home-assistant Docker tag to v2024' (#24) from renovate/homeassistant-home-assistant-2024.x into main
Reviewed-on: #24
2024-01-04 08:45:45 +00:00
399969677f Merge pull request 'Update Helm release immich to v0.3.1' (#22) from renovate/immich-0.x into main
Reviewed-on: #22
2024-01-04 08:44:55 +00:00
762756310a Update homeassistant/home-assistant Docker tag to v2024 2024-01-03 21:00:38 +00:00
ec964be7c3 whoopsie 2023-12-31 18:49:54 +01:00
0603da76b2 update gitea metric collection 2023-12-31 18:40:57 +01:00
a437c4228e update some scraping config 2023-12-31 18:26:45 +01:00
d5aab95186 try as a string 2023-12-31 17:58:15 +01:00
3acb329730 try again 2023-12-31 17:55:22 +01:00
73ce4e340f try again 2023-12-31 17:44:42 +01:00
0d4b6f4605 remove label requiremetns 2023-12-31 17:37:51 +01:00
deeb35bbb6 test monitoring 2023-12-31 17:34:11 +01:00
d4c658a28c match all servicemonitors? 2023-12-31 17:13:58 +01:00
1fcebe033b fix annotations 2023-12-31 17:06:13 +01:00
8fe51863f4 fix tag 2023-12-30 10:48:46 +01:00
c4eda4e75d fix tag 2023-12-30 10:45:23 +01:00
9490015728 maybe like that? 2023-12-30 10:42:23 +01:00
a641df167f remove port names 2023-12-30 10:39:55 +01:00
21d100fb62 update service config 2023-12-30 10:38:59 +01:00
26b06c553a deploy syncthing 2023-12-30 10:30:05 +01:00
d51bfcf7db Merge pull request 'Update Helm release homarr to v1.0.4' (#23) from renovate/homarr-1.x into main
Reviewed-on: #23
2023-12-27 17:27:57 +00:00
788c2436fc Update Helm release homarr to v1.0.4 2023-12-27 17:00:32 +00:00
c9f883eaa6 Update Helm release immich to v0.3.1 2023-12-23 16:00:31 +00:00
115 changed files with 2464 additions and 858 deletions

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git

View File

@@ -4,8 +4,7 @@
### Initial setup
#### Requirements:
- A running k3s instance run:
- `metalLB` deployed
- A running k3s instance
- `sealedsecrets` deployed
#### Installing argo and the app-of-apps

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.43
newTag: v0.107.44
namespace: adguard

View File

@@ -24,6 +24,8 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-tcp
nodePort: 31306
@@ -46,6 +48,7 @@ metadata:
spec:
allocateLoadBalancerNodePorts: true
loadBalancerIP: 192.168.3.2
externalTrafficPolicy: Local
ports:
- name: dns-udp
nodePort: 30547

8
apps/files/README.md Normal file
View File

@@ -0,0 +1,8 @@
# File sync
My personal cross-platform filesync. Using syncthing for my android and linux clients. And nextcloud for my ios clients.
## Overview
Both services share a common persistence which allows them to apply each their own logic for synching to other devices. The server acts as a relay.

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: files
resources:
- namespace.yaml
- pvc.yaml
- syncthing/
- nextcloud/

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud2.kluster.moll.re`)
kind: Rule
services:
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- ingress.yaml
- postgres.yaml
- postgres-credentials.sealedsecret.yaml
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

View File

@@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files
spec:
encryptedData:
database: AgBOgmqlfgiN2VqxNyYL6O+/jdzPmGg97zOXxZ7KiD07b4/2FdmlWgOZZp7oUpQ9RMV0WybC0jau2YVlgXB32afgJ3uinaAAhzZwvzy8dgapNpe8ClxnFINRhKKC9kxK7YeDwtptbDQn7YtEmVGHI66/71VyGy7NME4Pk0Y4FxxpF6KAZMAHNyez4JMa9V+XFtYV5G5bOkPY/ku4LcYntiMAlEaArF+re1m5nLQmZ4SVkWlOc41N4Hv1HrCv8qq2kj7zVR5/J2qW8NlzmdJJqv1AP1foELuITZZKxwNspxynNxhjXTX0fP6vzfJpxtzb2s/4Yh2uT/UPb2rOdcGaXjjHKxjSX23tG5ZT+z5lt0y9UEmUYytlcsYv9vsRqCmeFsB63S7aABeCRSOJyGLsuUc7xqSZ2ijDG38qLij+JPgoEIbSLfRYVGE5GMo9EbHt4N+ZIMpJYQXq0VhDip/r11SENfUa3XoautQ5uVR1D50FuSrN16t24bQXai9uifkBpDyvqbiqgv7s3qOjF9u8I0eyeJA0ZO1JO174B9SO3IcZYys8c87fSuWvFbGepLNqfneSIx93klDUdx3YEjqcrqib49+3/dn3RO9/puyhJ6O0TEZneToyauV3lxpR+XG/PDx7EQ88lELgD/AmtulsLHkYNgpoblFPbgDUeHhOgoBRAe22Hiy0Co4eh0SPVPyKhj8MyYhPtLEV+UY=
password: AgB2eY5aKJhEcJIgArGRrsqYf5pJJoXHRkplFpaqCCQW7X7WLREb+35HDijhnJSWRI2/LXDVy/8ArJe1LiiW+05aRY/9nvmjdpUmvsdQ6DK1mvirl8Py4JYueNrk2iUmI1h+ROyubBCvRBKxueQNkuwipKvk7nIlON6cwFnqp6GPcuWihSG/GZ2nSZmxmu+thdsM/S8DPaTW/N+Sut8DyarlCN94ZRiFVZIJialibfsJGQtL/uPX0W61GTkEU4m34IN9e+POdEdg3HuFMd3RvNQpndgPjaTv4A22TJRFs+rcHlcHr+5r8acVy1V+sZy97126Z7moeKDp1rFbG2/yMT1iS2oxQN4GJceTgMzSagqdn+KgD0N38OYvp+mRUQsl7+Fpglcq03vqbvxsc1fC78XpAAPMNA/pQDvtlS1qjuB7WCa5b3mkJxjc8efIuna9GAnDGh+djhlGHLEERnEfjlnpeDb/afRejUX+i6r00GnBxuRJfV+lKh4BJsnJm29nC4t10F6ff91Ngcjf+wCm5yWSFETZ9oFrPn10igGvoZwROJYABdtfMNjidGLkdKQnG1dj3EFu5XDn9vRqt8Iu/dEyoh7a2iGYDQ1lGpz+zxA/OZ9l/SuL6JUUwXq5W1/fSbtaBPdit4cwUTopq7AcpZkMuAQyVy6N9D0Hvjx432rCxmqyGU8PyjKHoAN+nuvTi79HtHR2wo4hJeIDoktdpxswSCe9VJEvqTFGQyCZtX3uEg==
username: AgATMaQ/BRCO9vx329YxGGUGl3E68Tm3coU6IO6pYm8f+Uf7ImH4l/P84mjGDLho1zBUfILPAvM4G5xG2qkkyW4mEuB8A7NNWAhXMOS5i1msNaV2oqLYNWCOG2lFO7unkYwPSyu9EyGn/Hq/kbGPAKfUf6dtDLEc+Y0S5Ue9YA2gYK4VYUec491+02EOoprGcfM1QdGPLBrunXn4krxtGm+eTsK8nd/lnm3DK+f5uGupO844i8T0mXE1xcliysBTZzxEVpmzPN8q4TMay6qcB2wOvEyngnGCfxJGTSjTrkydPFLcI4p6IONW5QAX9eQwo6ZDo56WVNgvyNW+ZJ6hmPP9nLeHnKb3rM91CIMM0GDRYc3VFsVXwBY/sj12hiompXEVQEp+EJUbgnDLK2lW+J602ZnzyHFgwGKnfdI8PHfKoxRVf06TXPdROu1mfXr5jOXc+++LoRotkVOuf2KXMip/7HlTkRlZXKkenhIqrTtQkENJ+aaxCKdQwgE8iDtmB6ZEBiMJq/dZgvn7qbcMc/SYF3l6YZKSU2L1359CRTeuQ6J6aDml+WHvgtwLH6sIgR9Sjgxid9XlhQ3/8f9UQdR6OpblsBZYn8gYEQ1WRr7H1R3IjENpA7LtburPYyogSk4eSFWR1hkwfiiTJrfwJCPEka28a7MqX0nCKZqzzUOQqXNGPX8W9rU8aA2HcnSPrzLoOV2av9h4icw=
template:
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files

View File

@@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: nextcloud-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: nextcloud
database: nextcloud
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nextcloud-config
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,155 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
ingress:
enabled: false
nextcloud:
host: nextcloud2.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
extraVolumes:
- name: files-nfs
persistentVolumeClaim:
claimName: files-nfs
extraVolumeMounts:
- name: files-nfs
mountPath: /files
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
# configs:
# config.php: |-
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: false
##
## External database configuration
##
externalDatabase:
enabled: true
type: postgresql
host: nextcloud-postgres-rw
database: nextcloud
existingSecret:
enabled: true
secretName: postgres-credentials
usernameKey: username
passwordKey: password
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
cronjob:
enabled: false
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-config
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: false
# disable when upgrading from a previous chart version
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

11
apps/files/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: files-nfs
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /files
- name: config
mountPath: /var/syncthing/config
volumes:
- name: persistence
persistentVolumeClaim:
claimName: files-nfs
- name: config
persistentVolumeClaim:
claimName: syncthing-config

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing2.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
# - syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: syncthing-config
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: UDP

View File

@@ -0,0 +1,16 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token

View File

@@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: spacedrive
spec:
selector:
matchLabels:
app: spacedrive
template:
metadata:
labels:
app: spacedrive
spec:
containers:
- name: spacedrive
image: spacedrive
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 80
volumeMounts:
- name: storage
mountPath: /data
volumes:
- name: storage
persistentVolumeClaim:
claimName: spacedrive-nfs

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: files1
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
images:
- name: spacedrive
newName: ghcr.io/spacedriveapp/spacedrive/server
newTag: 0.2.4

11
apps/files1/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: spacedrive-nfs
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi

View File

@@ -1,12 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: finance
name: actualbudget
labels:
app: actualbudget
spec:
# deployment running a single container
selector:
matchLabels:
app: actualbudget
@@ -18,7 +16,7 @@ spec:
spec:
containers:
- name: actualbudget
image: actualbudget/actual-server:latest
image: actualbudget
imagePullPolicy: Always
env:
- name: TZ
@@ -34,67 +32,3 @@ spec:
- name: actualbudget-data-nfs
persistentVolumeClaim:
claimName: actualbudget-data-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
# storageClassName: fast
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: "actualbudget-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
# selector:
# matchLabels:
# directory: "journal-data"
---
apiVersion: v1
kind: Service
metadata:
namespace: finance
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: finance
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: actualbudget
spec:
entryPoints:
- websecure
routes:
- match: Host(`actualbudget.kluster.moll.re`)
kind: Rule
services:
- name: actualbudget
port: 5006
tls:
certResolver: default-tls

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: "actualbudget-data-nfs"
spec:
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "actualbudget-data-nfs"
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
volumeName: actualbudget-data-nfs

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: actualbudget
spec:
selector:
app: actualbudget
ports:
- protocol: TCP
port: 5006
targetPort: 5006
type: ClusterIP

View File

@@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly-importer
name: firefly-importer
namespace: finance
spec:
selector:
matchLabels:
app: firefly-importer
template:
metadata:
labels:
app: firefly-importer
spec:
containers:
- image: fireflyiii/data-importer:latest
imagePullPolicy: Always
name: firefly-importer
resources: {}
ports:
- containerPort: 8080
env:
- name: FIREFLY_III_ACCESS_TOKEN
value: redacted
- name: FIREFLY_III_URL
value: firefly-http:8080
# - name: APP_URL
# value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
---
apiVersion: v1
kind: Service
metadata:
name: firefly-importer-http
namespace: finance
labels:
app: firefly-importer-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly-importer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-importer-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`importer.finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-importer-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: firefly
name: firefly
namespace: finance
spec:
selector:
matchLabels:
app: firefly
template:
metadata:
labels:
app: firefly
spec:
containers:
- image: fireflyiii/core:latest
imagePullPolicy: Always
name: firefly
resources: {}
ports:
- containerPort: 8080
env:
- name: APP_ENV
value: "local"
- name: APP_KEY
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
- name: DB_CONNECTION
value: sqlite
- name: APP_URL
value: https://finance.kluster.moll.re
- name: TRUSTED_PROXIES
value: "**"
volumeMounts:
- mountPath: /var/www/html/storage/database
name: firefly-database
volumes:
- name: firefly-database
persistentVolumeClaim:
claimName: firefly-database-nfs
---
apiVersion: v1
kind: Service
metadata:
name: firefly-http
namespace: finance
labels:
app: firefly-http
spec:
type: ClusterIP
ports:
- port: 8080
# name: http
selector:
app: firefly
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: firefly-ingress
namespace: finance
spec:
entryPoints:
- websecure
routes:
- match: Host(`finance.kluster.moll.re`)
kind: Rule
services:
- name: firefly-http
port: 8080
tls:
certResolver: default-tls

View File

@@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: finance
name: firefly-database-nfs
labels:
directory: firefly
spec:
# storageClassName: fast
# volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "1G"
nfs:
path: /firefly # inside nfs part.
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: finance
name: firefly-database-nfs
spec:
resources:
requests:
storage: "1G"
# storageClassName: fast
accessModes:
- ReadOnlyMany
volumeName: firefly-database-nfs

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: finance
resources:
- namespace.yaml
- actualbudget.pvc.yaml
- actualbudget.deployment.yaml
- actualbudget.service.yaml
- actualbudget.ingress.yaml
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.2.0

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homarr
resources:
- namespace.yaml
- pvc.yaml
- ingress.yaml
helmCharts:
- name: homarr
releaseName: homarr
repo: https://oben01.github.io/charts/
version: 1.0.1
valuesFile: values.yaml

View File

View File

@@ -1,60 +0,0 @@
# -- Default values for homarr
# -- Declare variables to be passed into your templates.
# -- Number of replicas
replicaCount: 1
env:
# -- Your local time zone
TZ: "Europe/Berlin"
# -- Colors and preferences, possible values dark / light
DEFAULT_COLOR_SCHEME: "dark"
# -- Service configuration
service:
# -- Service type
type: ClusterIP
# -- Service port
port: 7575
# -- Service target port
targetPort: 7575
# -- Ingress configuration
ingress:
enabled: false
persistence:
- name: homarr-config
# -- Enable homarr-config persistent storage
enabled: true
# -- homarr-config storage class name
storageClassName: "nfs-client"
# -- homarr-config access mode
accessMode: "ReadWriteOnce"
persistentVolumeReclaimPolicy: Retain
# -- homarr-config storage size
size: "50Mi"
# -- homarr-config mount path inside the pod
mountPath: "/app/data/configs"
- name: homarr-database
# -- Enable homarr-database persistent storage
enabled: true
# -- homarr-database storage class name
storageClassName: "nfs-client"
# -- homarr-database access mode
accessMode: "ReadWriteOnce"
# -- homarr-database storage size
size: "50Mi"
# -- homarr-database mount path inside the pod
mountPath: "/app/database"
- name: homarr-icons
# -- Enable homarr-icons persistent storage
enabled: true
# -- homarr-icons storage class name
storageClassName: "nfs-client"
# -- homarr-icons access mode
accessMode: "ReadWriteOnce"
# -- homarr-icons storage size
size: "50Mi"
# -- homarr-icons mount path inside the pod
mountPath: "/app/public/icons"

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2023.12"
newTag: "2024.2"

View File

@@ -1,17 +1,24 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.3.0
version: 0.3.1
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.95.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.95.1

View File

@@ -5,7 +5,7 @@ metadata:
name: immich-postgres
spec:
instances: 1
imageName: ghcr.io/bo0tzz/cnpgvecto.rs:16-v0.1.11
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.2
bootstrap:
initdb:
owner: immich
@@ -21,14 +21,5 @@ spec:
size: 1Gi
storageClass: nfs-client
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: postgres-exporter
spec:
selector:
matchLabels:
"cnpg.io/cluster": immich-postgres
podMetricsEndpoints:
- port: metrics
monitoring:
enablePodMonitor: true

View File

@@ -2,10 +2,6 @@
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
## Refer there for more detail about the supported values
image:
tag: v1.91.4
# These entries are shared between all the Immich components
env:

View File

@@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres.yaml
- synapse.deployment.yaml
- synapse.service.yaml
- synapse.configmap.yaml
- synapse.ingress.yaml
- postgres-credentials.secret.yaml
- mautrix.pvc.yaml
- mautrix-telegram.statefulset.yaml
- mautrix-telegram.configmap.yaml
- mautrix-whatsapp.statefulset.yaml
namespace: matrix
images:
- name: mautrix-telegram
newName: dock.mau.dev/mautrix/telegram
newTag: "v0.15.1"
- name: mautrix-whatsapp
newName: dock.mau.dev/mautrix/whatsapp
newTag: "v0.10.5"
- name: synapse
newName: ghcr.io/element-hq/synapse
newTag: "v1.100.0"

View File

@@ -0,0 +1,511 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-telegram
data:
config.yaml: |
# Homeserver details
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (for MXIDs, etc).
domain: matrix.kluster.moll.re
# Whether or not to verify the SSL certificate of the homeserver.
# Only applies if address starts with https://
verify_ssl: false
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# Number of retries for all HTTP requests if the homeserver isn't reachable.
http_retry_count: 4
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Whether asynchronous uploads via MSC2246 should be enabled for media.
# Requires a media repo that supports MSC2246.
async_media: false
# Application service host/registration related details
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-telegram:29318
# When using https:// the TLS certificate and key files for the address.
tls_cert: false
tls_key: false
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29317
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
max_body_size: 1
# The full URI to the database. SQLite and Postgres are supported.
# Format examples:
# SQLite: sqlite:filename.db
# Postgres: postgres://username:password@hostname/dbname
database: sqlite:mautrix-telegram.db
# The unique ID of this appservice.
id: telegram
# Username of the appservice bot.
bot_username: telegrambot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
bot_displayname: Telegram bridge bot
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
ephemeral_events: true
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Telegram users.
# {userid} is replaced with the user ID of the Telegram user.
username_template: "telegram_{userid}"
# Localpart template of room aliases for Telegram portal rooms.
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
alias_template: "telegram_{groupname}"
# Displayname template for Telegram users.
# {displayname} is replaced with the display name of the Telegram user.
displayname_template: "{displayname} (Telegram)"
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
# ID is used.
#
# If the bridge is working properly, a phone number or an username should always be known, but
# the other one can very well be empty.
#
# Valid keys:
# "full name" (First and/or last name)
# "full name reversed" (Last and/or first name)
# "first name"
# "last name"
# "username"
# "phone number"
displayname_preference:
- full name
- username
- phone number
# Maximum length of displayname
displayname_max_length: 100
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
# you're on a single-user instance, this should be safe to enable.
allow_avatar_remove: false
# Should contact names and profile pictures be allowed?
# This is only safe to enable on single-user instances.
allow_contact_info: false
# Maximum number of members to sync per portal when starting up. Other members will be
# synced when they send messages. The maximum is 10000, after which the Telegram server
# will not send any more members.
# -1 means no limit (which means it's limited to 10000 by the server)
max_initial_member_sync: 100
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
# If there are more members when trying to create a room, the room creation will be cancelled.
# -1 means no limit (which means all chats can be bridged)
max_member_count: -1
# Whether or not to sync the member list in channels.
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
# list regardless of this setting.
sync_channel_members: false
# Whether or not to skip deleted members when syncing members.
skip_deleted_members: true
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
# their Telegram account at startup.
startup_sync: false
# Number of most recently active dialogs to check when syncing chats.
# Set to 0 to remove limit.
sync_update_limit: 0
# Number of most recently active dialogs to create portals for when syncing chats.
# Set to 0 to remove limit.
sync_create_limit: 15
# Should all chats be scheduled to be created later?
# This is best used in combination with MSC2716 infinite backfill.
sync_deferred_create_all: false
# Whether or not to sync and create portals for direct chats at startup.
sync_direct_chats: false
# The maximum number of simultaneous Telegram deletions to handle.
# A large number of simultaneous redactions could put strain on your homeserver.
max_telegram_delete: 10
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
# at startup and when creating a bridge.
sync_matrix_state: true
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
# out-of-Matrix login website (see appservice.public config section)
allow_matrix_login: true
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
public_portals: false
# Whether or not to use /sync to get presence, read receipts and typing notifications
# when double puppeting is enabled
sync_with_custom_puppets: false
# Whether or not to update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, custom puppets will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
# If using this for other servers than the bridge's server,
# you must also set the URL in the double_puppet_server_map.
login_shared_secret_map:
example.com: foobar
# Set to false to disable link previews in messages sent to Telegram.
telegram_link_preview: true
# Whether or not the !tg join command should do a HTTP request
# to resolve redirects in invite links.
invite_link_resolve: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Maximum size of image in megabytes before sending to Telegram as a document.
image_as_file_size: 10
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
image_as_file_pixels: 16777216
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
# streaming from/to Matrix and using many connections for Telegram.
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
# This option uses internal Telethon implementation details and may break with minor updates.
parallel_file_transfer: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
always_custom_emoji_reaction: false
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (gzipped lottie)
# png - converts to non-animated png (fastest),
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: gif
# Should video stickers be converted to the specified format as well?
convert_from_webm: false
# Arguments for converter. All converters take width and height.
args:
width: 256
height: 256
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Settings for converting animated emoji.
# Same as animated_sticker, but webm is not supported as the target
# (because inline images can only contain images, not videos).
animated_emoji:
target: webp
args:
width: 64
height: 64
fps: 25
# # End-to-bridge encryption support options.
# #
# # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
# encryption:
# # Allow encryption, work in group chat rooms with e2ee enabled
# allow: false
# # Default to encryption, force-enable encryption in all portals the bridge creates
# # This will cause the bridge bot to be in private chats for the encryption to work properly.
# default: false
# # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
# appservice: false
# # Require encryption, drop any unencrypted messages.
# require: false
# # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# # You must use a client that supports requesting keys from other users to use this feature.
# allow_key_sharing: false
# # Options for deleting megolm sessions from the bridge.
# delete_keys:
# # Beeper-specific: delete outbound sessions when hungryserv confirms
# # that the user has uploaded the key to key backup.
# delete_outbound_on_ack: false
# # Don't store outbound sessions in the inbound table.
# dont_store_outbound: false
# # Ratchet megolm sessions forward after decrypting messages.
# ratchet_on_decrypt: false
# # Delete fully used keys (index >= max_messages) after decrypting messages.
# delete_fully_used_on_decrypt: false
# # Delete previous megolm sessions from same device when receiving a new one.
# delete_prev_on_new_session: false
# # Delete megolm sessions received from a device when the device is deleted.
# delete_on_device_delete: false
# # Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
# periodically_delete_expired: false
# # Delete inbound megolm sessions that don't have the received_at field used for
# # automatic ratcheting and expired session deletion. This is meant as a migration
# # to delete old keys prior to the bridge update.
# delete_outdated_inbound: false
# # What level of device verification should be required from users?
# #
# # Valid levels:
# # unverified - Send keys to all device in the room.
# # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# # Note that creating user signatures from the bridge bot is not currently possible.
# # verified - Require manual per-device verification
# # (currently only possible by modifying the `trust` column in the `crypto_device` database table).
# verification_levels:
# # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
# receive: unverified
# # Minimum level that the bridge should accept for incoming Matrix messages.
# send: unverified
# # Minimum level that the bridge should require for accepting key requests.
# share: cross-signed-tofu
# # Options for Megolm room key rotation. These options allow you to
# # configure the m.room.encryption event content. See:
# # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# # more information about that event.
# rotation:
# # Enable custom Megolm room key rotation settings. Note that these
# # settings will only apply to rooms created after this option is
# # set.
# enable_custom: false
# # The maximum number of milliseconds a session should be used
# # before changing it. The Matrix spec recommends 604800000 (a week)
# # as the default.
# milliseconds: 604800000
# # The maximum number of messages that should be sent with a given a
# # session before changing it. The Matrix spec recommends 100 as the
# # default.
# messages: 100
# # Disable rotating keys when a user's devices change?
# # You should not enable this option unless you understand all the implications.
# disable_device_change_key_rotation: false
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Should cross-chat replies from Telegram be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
# been sent to Telegram.
delivery_receipts: false
# Whether or not delivery errors should be reported as messages in the Matrix room.
delivery_error_reports: false
# Should errors in incoming message handling send a message to the Matrix room?
incoming_bridge_error_reports: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it,
# except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
# The favorites tag is `m.favourite`.
pinned_tag: null
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
archive_tag: null
# Whether or not mute status and tags should only be bridged when the portal room is created.
tag_only_on_create: true
# Should leaving the room on Matrix make the user leave on Telegram?
bridge_matrix_leave: true
# Should the user be kicked out of all portals when logging out of the bridge?
kick_on_logout: true
# Should the "* user joined Telegram" notice always be marked as read automatically?
always_read_joined_telegram_notice: true
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
# Requires the user to have sufficient power level and double puppeting enabled.
create_group_on_invite: true
# Settings for backfilling messages from Telegram.
backfill:
# Allow backfilling at all?
enable: true
# Whether or not to enable backfilling in normal groups.
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
# will likely cause problems if there are multiple Matrix users in the group.
normal_groups: false
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
# Set to -1 to let any chat be unread.
unread_hours_threshold: 720
# Forward backfilling limits.
#
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
forward_limits:
# Number of messages to backfill immediately after creating a portal.
initial:
user: 50
normal_group: 100
supergroup: 10
channel: 10
# Number of messages to backfill when syncing chats.
sync:
user: 100
normal_group: 100
supergroup: 100
channel: 100
# Timeout for forward backfills in seconds. If you have a high limit, you'll have to increase this too.
forward_timeout: 900
# Settings for incremental backfill of history. These only apply to Beeper, as upstream abandoned MSC2716.
incremental:
# Maximum number of messages to backfill per batch.
messages_per_batch: 100
# The number of seconds to wait after backfilling the batch of messages.
post_batch_delay: 20
# The maximum number of batches to backfill per portal, split by the chat type.
# If set to -1, all messages in the chat will eventually be backfilled.
max_batches:
# Direct chats
user: -1
# Normal groups. Note that the normal_groups option above must be enabled
# for these to be backfilled.
normal_group: -1
# Supergroups
supergroup: 10
# Broadcast channels
channel: -1
# Overrides for base power levels.
initial_power_level_overrides:
user: {}
group: {}
# Whether to bridge Telegram bot messages as m.notices or m.texts.
bot_messages_as_notices: true
bridge_notices:
# Whether or not Matrix bot messages (type m.notice) should be bridged.
default: false
# List of user IDs for whom the previous flag is flipped.
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
# notices from users listed here will be bridged.
exceptions: []
# An array of possible values for the $distinguisher variable in message formats.
# Each user gets one of the values here, based on a hash of their user ID.
# If the array is empty, the $distinguisher variable will also be empty.
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
# The formats to use when sending messages to Telegram via the relay bot.
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
#
# Available variables:
# $sender_displayname - The display name of the sender (e.g. Example User)
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
# $message - The message content
message_formats:
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
# Telegram user info is available in the following variables:
# $displayname - Telegram displayname
# $username - Telegram username (may not exist)
# $mention - Telegram @username or displayname mention (depending on which exists)
emote_format: "* $mention $formatted_body"
# The formats to use when sending state events to Telegram via the relay bot.
#
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
# In name_change events, `$prev_displayname` is the previous displayname.
#
# Set format to an empty string to disable the messages for that event.
state_event_formats:
join: "$distinguisher <b>$displayname</b> joined the room."
leave: "$distinguisher <b>$displayname</b> left the room."
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
# `filter-mode` management commands.
#
# An empty blacklist will essentially disable the filter.
filter:
# Filter mode to use. Either "blacklist" or "whitelist".
# If the mode is "blacklist", the listed chats will never be bridged.
# If the mode is "whitelist", only the listed chats can be bridged.
mode: blacklist
# The list of group/channel IDs to filter.
list: []
# How to handle direct chats:
# If users is "null", direct chats will follow the previous settings.
# If users is "true", direct chats will always be bridged.
# If users is "false", direct chats will never be bridged.
users: true
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!tg"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Telegram bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Send each message separately (for readability in some clients)
management_room_multiple_messages: false
# Permissions for using the bridge.
# Permitted values:
# relaybot - Only use the bridge via the relaybot, no access to commands.
# user - Relaybot level + access to commands to create bridges.
# puppeting - User level + logging in with a Telegram account.
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
# admin - Full access to use the bridge and some extra administration commands.
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"matrix.kluster.moll.re": "full"
"@remy:matrix.kluster.moll.re": "admin"
# Options related to the message relay Telegram bot.
relaybot:
private_chat:
# List of users to invite to the portal when someone starts a private chat with the bot.
# If empty, private chats with the bot won't create a portal.
invite: []
# Whether or not to bridge state change messages in relaybot private chats.
state_changes: true
# When private_chat_invite is empty, this message is sent to users /starting the
# relaybot. Telegram's "markdown" is supported.
message: This is a Matrix bridge relaybot and does not support direct chats
# List of users to invite to all group chat portals created by the bridge.
group_chat_invite: []
# Whether or not the relaybot should not bridge events in unbridged group chats.
# If false, portals will be created when the relaybot receives messages, just like normal
# users. This behavior is usually not desirable, as it interferes with manually bridging
# the chat to another room.
ignore_unbridged_group_chat: true
# Whether or not to allow creating portals from Telegram.
authless_portals: true
# Whether or not to allow Telegram group admins to use the bot commands.
whitelist_group_admins: true
# Whether or not to ignore incoming events sent by the relay bot.
ignore_own_incoming_events: true
# List of usernames/user IDs who are also allowed to use the bot commands.
whitelist:
- myusername
- 12345678
# Telegram config
telegram:
# Get your own API keys at https://my.telegram.org/apps
api_id: 862555
api_hash: 7387a7b6ba71793d6f3fa98261117e4e
# (Optional) Create your own bot at https://t.me/BotFather
bot_token: disabled
# Should the bridge request missed updates from Telegram when restarting?
catch_up: true
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
sequential_updates: true
exit_on_update_error: false

View File

@@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-telegram
spec:
selector:
matchLabels:
app: mautrix-telegram
serviceName: mautrix-telegram
replicas: 1
template:
metadata:
labels:
app: mautrix-telegram
spec:
containers:
- name: mautrix-telegram
image: mautrix-telegram
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
- name: persistence
mountPath: /data
args:
- --no-update # disable overwriting config.yaml
volumes:
- name: config
configMap:
name: mautrix-telegram
- name: persistence
emptyDir: {}

View File

@@ -0,0 +1,428 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-whatsapp
data:
config.yaml: |
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: matrix.kluster.moll.re
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-whatsapp:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: sqlite3-fk-wal
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: file:/data/mautrix-whatsapp.db?_txlock=immediate
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-whatsapp.log
max_size: 100
max_backups: 10
compress: true

View File

@@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-whatsapp
spec:
selector:
matchLabels:
app: mautrix-whatsapp
serviceName: mautrix-whatsapp
replicas: 1
template:
metadata:
labels:
app: mautrix-whatsapp
spec:
containers:
- name: mautrix-whatsapp
image: mautrix-whatsapp
volumeMounts:
- name: persistence
mountPath: /data
# contains config.yaml
securityContext:
fsGroup: 1337
volumes:
- name: persistence
persistentVolumeClaim:
claimName: mautrix-whatsapp

View File

@@ -0,0 +1,23 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-telegram
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-whatsapp
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

20
apps/matrix/postgres.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: matrix-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: matrix
database: matrix
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

View File

@@ -0,0 +1,62 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: synapse
data:
# matrix.kluster.moll.re.log.config: |
# version: 1
# formatters:
# precise:
# format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
# handlers:
# console:
# class: logging.StreamHandler
# formatter: precise
# loggers:
# # This is just here so we can leave `loggers` in the config regardless of whether
# # we configure other loggers below (avoid empty yaml dict error).
# _placeholder:
# level: "INFO"
# synapse.storage.SQL:
# # beware: increasing this to DEBUG will make synapse log sensitive
# # information such as access tokens.
# level: INFO
# root:
# level: INFO
# handlers: [console]
homeserver.yaml: |
server_name: "matrix.kluster.moll.re"
report_stats: false
# enable_registration: true
# enable_registration_without_verification: true
listeners:
- port: 8448
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
compress: false
# log_config: "./matrix.kluster.moll.re.log.config"
media_store_path: /media_store
trusted_key_servers:
- server_name: "matrix.org"
database:
name: psycopg2
args:
user: matrix
password: "0ssdsdsdM6vbxhs.kdjsdasd9Z0qK5bdTwM6vbxh9Z"
dbname: matrix
host: matrix-postgres-rw
cp_min: 5
cp_max: 10

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: synapse
spec:
selector:
matchLabels:
app: synapse
template:
metadata:
labels:
app: synapse
spec:
containers:
- name: synapse
image: synapse
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 8448
env:
- name: SYNAPSE_CONFIG_PATH
value: /config/homeserver.yaml
volumeMounts:
- name: config
mountPath: /config/homeserver.yaml
subPath: homeserver.yaml
- name: config-persistence
mountPath: /config
- name: media
mountPath: /media_store
securityContext:
fsGroup: 1001
volumes:
- name: config
configMap:
name: synapse
- name: config-persistence
emptyDir: {}
- name: media
emptyDir: {}

View File

@@ -0,0 +1,29 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: synapse-federation
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix.kluster.moll.re`)
kind: Rule
services:
- name: synapse
port: 8448
# auto route to the _matrix path
middlewares:
- name: matrix-redirect
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: matrix-redirect
spec:
redirectRegex:
regex: "^https://matrix.kluster.moll.re/(.*)"
replacement: "https://matrix.kluster.moll.re/_matrix/$${1}"
permanent: true

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: synapse
spec:
selector:
app: synapse
ports:
- protocol: TCP
port: 8448
targetPort: 8448

View File

@@ -2,13 +2,15 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: 8096
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/

View File

@@ -20,6 +20,7 @@ spec:
cpu: "2"
ports:
- containerPort: 8096
name: jellyfin
env:
- name: TZ
value: Europe/Berlin

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: Service
metadata:
name: jellyfin-server
labels:
app: jellyfin-server-service
spec:
selector:
app: jellyfin-server

View File

@@ -11,13 +11,14 @@ resources:
# prometheus-operator crds
- https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.70.0/bundle.yaml
- prometheus.yaml
- thanos-objstore-config.sealedsecret.yaml
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 7.0.19
version: 7.3.0
valuesFile: grafana.values.yaml
- releaseName: influxdb

View File

@@ -39,22 +39,27 @@ roleRef:
subjects:
- kind: ServiceAccount
name: prometheus
# namespace: default
namespace: monitoring # needs to be the same as in the kustomization.yaml
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
serviceAccountName: prometheus
serviceMonitorSelector: # match all service monitors by default
matchLabels: {}
serviceMonitorNamespaceSelector: # match all namespaces by default
matchLabels: {}
resources:
requests:
memory: 400Mi
serviceAccountName: prometheus
enableAdminAPI: false
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
thanos:
version: v0.33.0
objectStorageConfig:
# loads the config from a secret named thanos-objstore-config in the same namespace
key: thanos.yaml
name: thanos-objstore-config
---
apiVersion: v1
kind: Service

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
spec:
encryptedData:
thanos.yaml: AgCXlr7NO2DoH1R0ngtDFi8rgJaDnW5WSmOMjvXF4GMcEjnn1kwQMLkF0Xz1BUB5GlQkTAg+ZjCWGMlfycBmUnZb+koZK3X1YLsk1BxBxtuSqhj35iQYxKQ7rAlsz7FxUQjK2oiJkFeQmo/rwcw6l6vZJ73+THYSebR9mLQ/H0pnmJM3ldLX4iWL2H8BZ7ftOYdXO7Xv0lk2k2L4O4LgnB1Uedpyk0HLVxAv3VdVU/RFpHm5Q7kudrCMm9ENcJG7qIWuii8GkysvEefbo2phgKn1Zr5XR6SyekuW2e6FyHe9us5Pv5HnJ6Z2+ZyewygaGgHiRqtxRMaLbahICewfSHwyGzeAD2kdgwVyJYXxVPV9qKQvZmj0ZDCDZ5K548mSUq7nNXSI9M9AJBTKUoqb2FXK3pqn4yh9M1l+7Pmno5Fs22blAyGsRqO32GxrYvEXPpdSeqHRjOMYTnbPuteGRKcvmSEUSuHzkeoTzU1Jh4Sg0ygtQUNIKtbwhJm1XpbJ0oaR5ukWMxPfpDv+B5FmrDsU/I+o62+NtCLQLkK6MoRBFiJ1kymtKkM3vQ1CVg4Vtc5Gc2D6mMu5K8kEuUODweBb8qPnYH7ULfTYORldj3d+Fb2mGF5mAU6xHMzbocsdgZpbAzUP/FfJmMMDWf4aW3LJ1mBjUD06KAwPsQvbTm6VInrdXh2QVb4UIp41kbyK8sanHrvh3bprHloxt8OnTZ2HQl+XN+kxYirkVkL34lIlk7KdYCWqO7QqH0ncd9WF0f9mpPGbxo3J
template:
metadata:
creationTimestamp: null
name: thanos-objstore-config
namespace: monitoring
type: Opaque

View File

@@ -23,3 +23,29 @@ spec:
requests:
storage: "150Gi"
volumeName: nextcloud-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-syncthing-shared
spec:
capacity:
storage: "150Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/syncthing
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-syncthing-shared
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
volumeName: nextcloud-syncthing-shared

17
apps/nextcloud/readme.md Normal file
View File

@@ -0,0 +1,17 @@
## Running occ commands
Sometimes you need to run a command on the Nextcloud container directly. You can do that by running commands as the user www-data via the kubectl exec command.
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ myocccomand"
```
Here are some examples below.
Putting Nextcloud into maintanence mode
Some admin actions require you to put your Nextcloud instance into
(e.g. backups):
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ maintenance:mode --on"
```

View File

@@ -1,9 +1,6 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
image:
tag: "28"
ingress:
enabled: false
@@ -49,6 +46,15 @@ nextcloud:
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
extraVolumes:
- name: my-volume
persistentVolumeClaim:
claimName: nextcloud-nfs
extraVolumeMounts:
- name: my-volume
mountPath: /var/www/html/my-volume
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
@@ -74,8 +80,7 @@ nginx:
enabled: false
internalDatabase:
enabled: true
name: nextcloud
enabled: false
##
## External database configuration
@@ -89,13 +94,7 @@ externalDatabase:
## Database host
host: postgres-postgresql.postgres
## Database user
# user: nextcloud
# ## Database password
# password: test
## Database name
database: nextcloud
## Use a existing secret

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
spec:
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: mealie
image: mealie
resources:
limits:
memory: "500Mi"
cpu: "500m"
ports:
- containerPort: 9000
env:
- name: ALLOW_SIGNUP
value: "true"
- name: TZ
value: Europe/Paris
- name: BASE_URL
value: https://recipes.kluster.moll.re
volumeMounts:
- name: mealie-data
mountPath: /app/data
volumes:
- name: mealie-data
persistentVolumeClaim:
claimName: mealie-data

View File

@@ -1,15 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: homarr-ingress
name: mealie-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`start.kluster.moll.re`)
- match: Host(`recipes.kluster.moll.re`)
kind: Rule
services:
- name: homarr
port: 7575
- name: mealie-web
port: 9000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: recipes
resources:
- namespace.yaml
- deployment.yaml
- pvc.yaml
- service.yaml
- ingress.yaml
images:
- name: mealie
newTag: v1.2.0
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

12
apps/recipes/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie-data
spec:
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce

10
apps/recipes/service.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie-web
spec:
selector:
app: mealie
ports:
- port: 9000
targetPort: 9000

View File

@@ -14,4 +14,3 @@ spec:
port: 80
tls:
certResolver: default-tls

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /var/syncthing
volumes:
- name: persistence
persistentVolumeClaim:
claimName: syncthing-claim

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: syncthing
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
- syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

11
apps/syncthing/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: syncthing-claim
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: UDP

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token
namespace: syncthing

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
spec:
encryptedData:
token: AgC1hG1aguLIWBgA1R19MGrXDq7BONAldMEXtCeGXLO9Xar08f7qFqprtRJAMOID4trUEBMAkF96m7rH7QHTpO0WzRLrJctLi7U6NgESUJBDxusqjij3RAANS69Xt27mu2oa+rhm605CfFJT6Gpx/2CxrFtUD3yCijilDnEVvw4WvTLHvVQMCd8cM8ZDlpBsSYbxvtCUN1+B02DCucLpMphspxV2SGPAdc04xQD7d0vUhNLekFi0xSgu0jiRGVDHOG5Egd9d/BGeNOBgiUVxJxqqdXc6EmkslcSUtMQJ5luSxjogf+p3jdOqt4aPpUeR8sSPb6OSEIZD/Cfs9X4akHdpUAqkycu+V24lDxeHWAtIviCMBPttrwNAEytgwqaiT0U4UmL5GqR97jpmy3Tx+jYKuXkt4Igb6VByreuL9aZacRrqRhCCgbg95Y/UrYlLAbZYOI/+KsFzB5akGpZXUDcW9h2IkTUmcT+QxWXqEoNpoTI5qAnKiu/9T5elDKghjMHYX+CnPj+rXlQIJzX7NkZ0Q6HpKQ4B2Vd1Ewkvadf963jBodUe7WiMt8UeYgzCa33F4U23JjExIrL8t3r8MQ/IIdtfUvyz6Da1vp5hjpBUnUCk8rca/6VC3GO1GP3DLdIXiZQY1OOTHJlyLG7+bIL35zVfkmLMzmlIdaFsfeYiL4P+hYRbLABPAJk8lY7MEdiczpvI9HlmFVatJaPrFJwx9jyhzqIOq5eGt0OIkFt+fw==
template:
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
type: Opaque

View File

@@ -3,11 +3,13 @@ kind: IngressRoute
metadata:
namespace: whoami
name: whoami-ingressroute
annotations:
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`) || Host(`moll.re`)
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`)
kind: Rule
services:
- name: whoami

View File

@@ -1,19 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: cloudflare-keys
namespace: ddns
spec:
encryptedData:
CLOUDFLARE_API_KEY: AgCSEPeO4QRhQLLi/PPBxKOu7boxEbk1wb23WT3ZMw8nMgw3kCST2lvyKSacVWX7czv+JYXU6z2eNvZ001xr/0EXsHtsM/AbAf6KmPzzZUzLwWkDJ0iIyw4yDVKqPqP4N2T+KOJpIh6o9kmit/kFyZ8cRlPsuBsFYOKljI6IxLBvoaiMQiYeymL50b0bmWhxK7VNkISxDyVGLkCWl9cMCvietoawSq64LDuAvwYnOFoXOmnxa523vYsLp/L9lNK5Yfk3ywlyjHeJgr8Bz80QRmBs8Fbo3dv2yZVZxHFDUY8+BNHk1N8qgjeaW1g02+puTntxlQu82Ea1gyhQ+Ft/DKQ9EPk7asDHaMtNybckHh7Hb/8QJDsk9cIAK3btE99IANzVUIddO7g8nPv/NybSpWq3kw40ErUjZVraOkAMEX7YIwJKD2n8RKps0Z3xDSYWkWygKQePoonh9xaQhV2R4Enc8nkzf/+eX7yg9SzZeAgR6w+RY+fi+BPC/PW/GgH4cRr8jgMBzEIDrw0OShdjYKlij8Xwe5vq/GdXmI2A9ZJnn2MhG7EKlxBlWD1FF219shqtro6ppVBJZNQAfWlg6jM4SFWP3DwL0RoO5dz+GMXofVBXG0sp6Ay0ILOxnXe/DHlgZN9AS4EcM1lWp6Rs31TkVMI/PUXL7uDwgBOZIwUdGJG+OlHNxXQZ/nuUIDIhZgBfBHuFnOpNN8KmJN33P1e2Wm5daru3bIKyr85LGfHhetRw/1jW6t4b
CLOUDFLARE_DNS_RECORD_ID: AgBx3Fu7qkgbrxJw0JtNYQyAop0yY7vCZ271GgTvUSZKQhdTY4mlo/xWL/3+sB4SnKRpyZ4/HcmJUEMrW6yb7aHKrQtzwvZRBeYQ52LMAQKBLzPqzKjce1VomqpMFwtferORR4drBYXFFuBJ7xEkRcJSaQ4dJTAWNQF/TKXJtMLswPGA259S6xfjuXaDcPA8faV63L0MPDqNU22Zzr99fOoEFRrYvyFVe1verXzJ2NnfgY6Em87cDRZHEUn0VZ03BURvZW4ElIODyRmSkUsptjvjjOVhSDixBkRljA/Ybp641Wa3Y2ZjKj3ta5g/CvPU50sHjlhQnV1v8viyqlxt79O2ysH+keeIxwQJOCzQYv44aXCWLvmswMTGsiOaKWCsCG8iSkXQe0VO44V7GOb9tZ+vbpi0o6x6+rh5N/L93dZdQydEG0omOkN5YmyZLN/wlepBSXPQPqFYVljaW6a/oGNNYwULPkmvqa5tUgSFuQDgrIHSJKFR8oBnBLYEKlglkLCOZKxqI+SkoeIK1TLP1eJ1hea3e6ULJopZFK6XLr1DJOl2pefPczeummLuoxtyJwmGWM9uGcVuDwi0V6N4ixWAnWWiimFVIFKjY6Fj7QJLge3B0K1gNtpeIIBlZ6whns/4YYv5DSojgCKS1OkmCohYQXN4/t+AG90OoR79+oJiVdzwrMCHRpLddfSGGfSzm2DkKw4x1kxptyQRJg3y89Kd2acM7kDvbaajMFNCy3yl3A==
CLOUDFLARE_ZONE_ID: AgCn20qDEs/2F3RtrSIyiBXD5IwPYB/yTkUhG3aA7puH8UlkVILzTXJfVGTeSp7hkDgBjcoX3o/KtMR44sS6oGl3cYYtvbni2/iplarqqZflRzvW7MSyE3TuSi8cVk+aTab0+mI4BkwiQ/RI1jolCIfcblh10kvJBSrH67R4JygKqb0OyM0DWtCXzMyUFunGKkEIpjnMQN/OfMhk+G9iB57iL9EL2ROl2ZgpYPwsDc+XFb8R1FGKJfpTux0b50NYS95VBb8uOZJ7kgBabfPn3hTlN4bL7uzDWlCEotGOTJ1tYhBfgQN7O5SxU4/Lks11mqv6TRenZPXb7pe39/k6BOoh7dY6odqfT9MWD/OjTJ78RAKcfwA/z2zEXmCxrKdZwC8aQ9I35rLYo6rzktcguWn9nThza5bN3uQF9Gy16R0Dq5JO37I61gFkH8UHmCax+8/a8YlcUkeHSii7AUFv8z8VaFI2qzz4OtyeRO0yatP87D8ID98kFeymzU1AVacCqLsm08qaPHj7yOV1B6mxufHaYsuXckDcGEh1e03Op+hYDqff0pz7oQ2izHFvx+w0X2AbMXx+8j+Grpw14PO8bWal2DQBVu8/+4bfAFWoCVMuE4Tf/jGnbzlUHvU8N9wnpQ9OgV9EOO7m2A5vbGOsjIGV8kZrWNKhzHs6BroJl2GIyiteTJC+/EbNGK8y4hZrmjIhjhlel0X2GgT/3iQ5svH2yrIGvUmONk8ZBE2xjI+V4A==
KLUSTER_DOMAIN: AgBVbvgJHtG6jnZK8vU+Hf+6U948uwd5yMBCOoIqvlEIN+uLQqorLNLvUuvStZjsga4B+2biufuYcIbc41yyRJc8Z0MKmGjvZI7l4oCZ3uu09qlFLV1xMjrQC5HtXNJd/x9nbFToj9wEdM/IZY2medJ59MaIolmeck14ZIiwCJiMTXG4UZt1eoS9J6NEIweqPWA/x9uq1IATVTPtL80sH6OtjMDzmmjx6lDgQfw8saoxg8zmHBAAi+MT6GCUhAzjRl/bCA8oUfuW69exXl0FMBlxWyi/+vXNDCOwVJDiVTDfodThr0ocGAaEkZHx7w5jaPdfh1+Wj2dsHTxyp1LIIYSQy7m9elRV8Wz74//5ejsDbETKM69qirsOhQbPNJpEj4Tnjr5re1o49hH8ej0KcFOpHjMcupwJW4sqNJqEyqUnP4C/BE5bySauuYT0pShAc24yQyJHQokdei0oBKzRtUDiM4+2NQVZXxURv2oSIfgB0CKfHPjsYpQxRJ4B83qBUGUmLZYyFnwl3FWwbHhrgmHBE2qik9XgcssvMWMhobkxiWd46KsaXVnjU7KqlTHWuEoI6AaC1s2r0WTJGf0zY1Mn93Na1uLgypwwgkUx/cV2l+ThzMETPXjNKm4INn6WeGmE6HomRXqBcJxgg4+RimXKtaitXxr7ujZZEZhIiVGgDJT36pwAjZBg3kD6NeJ10LjivwB9VuYHWjqgGyicuEZ5TA==
template:
metadata:
creationTimestamp: null
name: cloudflare-keys
namespace: ddns
type: Opaque

View File

@@ -1,58 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: ddns-cronjob
spec:
schedule: "0 6,18 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: curl
command:
- /bin/sh
- -c
args:
- >-
CURRENT_IP=$(curl -4 ifconfig.me)
&&
echo "Current IP: $CURRENT_IP"
&&
curl https://ntfy.kluster.moll.re/kluster_ip
-H "Title: Kluster IP"
-d "Currently: $(CURRENT_IP)"
&&
curl
--request PUT
--url https://api.cloudflare.com/client/v4/zones/$(CLOUDFLARE_ZONE_ID)/dns_records/$(CLOUDFLARE_DNS_RECORD_ID)
--header "Authorization: Bearer $(CLOUDFLARE_API_KEY)"
--header "Content-Type: application/json"
--data '{"content": "$(CURRENT_IP)", "name": "$(KLUSTER_DOMAIN)", "proxied": false, "type": "A"}'
&&
echo "Updated $(KLUSTER_DOMAIN) to $(CURRENT_IP) successfully"
env:
- name: CLOUDFLARE_ZONE_ID
valueFrom:
secretKeyRef:
name: cloudflare-keys
key: CLOUDFLARE_ZONE_ID
- name: CLOUDFLARE_API_KEY
valueFrom:
secretKeyRef:
name: cloudflare-keys
key: CLOUDFLARE_API_KEY
- name: CLOUDFLARE_DNS_RECORD_ID
valueFrom:
secretKeyRef:
name: cloudflare-keys
key: CLOUDFLARE_DNS_RECORD_ID
- name: KLUSTER_DOMAIN
valueFrom:
secretKeyRef:
name: cloudflare-keys
key: KLUSTER_DOMAIN
restartPolicy: Never

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ddns
images:
- name: curl
newName: curlimages/curl
newTag: 8.5.0
resources:
- namespace.yaml
- cronjob.yaml
- cloudflare-keys.sealedsecret.yaml

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: cloudflare-api
namespace: external-dns
spec:
encryptedData:
CLOUDFLARE_TOKEN: AgA+HAbpfu4MUK152g2XuWnCoLflCgp4C5gpUWy/IRCETyhcaP5SefAPGC/TGdGPTsZWoDO9qIDAdEiFw4Aw5idfBOm1Ql7vLunWeqNysirU9QJIbL21Fb8+UafrLnAGQySjzAT4MyK5yntn3T8l568cR22jPQi5a0CqL91jGXBeANkTQlokMFJCYkYsaqhCirSDlldvVrJGlWg+T1odyqyytIOO9OaYNt0jA1NisKpLWcLPwYcVkf0ntdCSQaloMX/LeoY16kECOVMYrVPIqGbOhCvAehpjyXxydFjyaYIV5p5hlKD0Sjlpc9zTCTFF7KUddNU9m2GhJqKT8bZm0d2g1yth4dNLgbUSp1nU31vpRalRJYXBVwPVei0lSGL7Jkb9LzCRHxL4J8hP/AeYrntpoAqMDxZsMZSpUnbTQklT2WyvIzyhpiNtEFrH8P6CYq61dWENXWkMwDqzKfM7Xlg9ifW6YzTQfsoo/OWhtWRmLDNVrXwhZqRWb7UjYr6xGPAzc/I4H2SJk5HLubylaXY3I2X2dWy+YTttiUuzQl0YfzrADAlu8ZWPiAfLqGmKnOR0STqeCvAT6ya8Ky09aY5GWLdTJfTayivGA0PvRJE2idf/VtpVDBERN5lCDHLBrRvU8o5wBlkTxU4B8zHmF+pu7zO3bA8IOUpyQMnlFzZTOI1s0Tl6XiRmy+WnA4tyxjrEse75BvS3WGCnaezQJW9gpF1/UC18vys46f9Jn5FHfm4lMpOscSIy
template:
metadata:
creationTimestamp: null
name: cloudflare-api
namespace: external-dns
type: Opaque

View File

@@ -0,0 +1,63 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: octodns-cronjob
spec:
schedule: "0 */6 * * *"
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
spec:
initContainers:
- name: git
image: git
command: ["git"]
args:
- clone
- https://git.kluster.moll.re/remoll/dns.git
- /etc/octodns
volumeMounts:
- name: octodns-config
mountPath: /etc/octodns
containers:
- name: octodns
image: octodns
env:
# - name: CLOUDFLARE_ACCOUNT_ID
# valueFrom:
# secretKeyRef:
# name: cloudflare-api
# key: CLOUDFLARE_ACCOUNT_ID
- name: CLOUDFLARE_TOKEN
valueFrom:
secretKeyRef:
name: cloudflare-api
key: CLOUDFLARE_TOKEN
# - name: CLOUDFLARE_EMAIL
# valueFrom:
# secretKeyRef:
# name: cloudflare-api
# key: CLOUDFLARE_EMAIL
command: ["/bin/sh", "-c"]
args:
- >-
cd /etc/octodns
&&
pip install -r ./requirements.txt
&&
octodns-sync --config-file ./config.yaml --doit
&&
echo "done..."
volumeMounts:
- name: octodns-config
mountPath: /etc/octodns
volumes:
- name: octodns-config
emptyDir: {}
restartPolicy: Never

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: external-dns
resources:
- namespace.yaml
- cloudflare-api.sealedsecret.yaml
- cronjob.yaml
images:
- name: octodns
newName: octodns/octodns # has all plugins
newTag: "2024.02"
- name: git
newName: alpine/git
newTag: "2.43.0"

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -22,6 +22,8 @@ spec:
value: ":80"
- name: DRONE_GITEA_SERVER
value: https://git.kluster.moll.re
- name: DRONE_USER_CREATE
value: username:remoll,admin:true
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:

View File

@@ -0,0 +1,10 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: gitea-servicemonitor
spec:
endpoints:
- port: http
selector:
matchLabels:
app.kubernetes.io/name: gitea

View File

@@ -98,13 +98,9 @@ gitea:
# this user needs to stay admin, and active. But we change its password to be unusable
password: changedalready
email: "gitea@delete.me"
## @param gitea.metrics.enabled Enable Gitea metrics
## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor
metrics:
enabled: true
serviceMonitor:
enabled: true
## @param gitea.config Configuration for the Gitea server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/)
config:

View File

@@ -4,6 +4,7 @@ resources:
- namespace.yaml
- gitea.pvc.yaml
- gitea.ingress.yaml
- gitea.servicemonitor.yaml
- drone-kube-runner.deployment.yaml
- drone-server.deployment.yaml
- drone-server.sealedsecret.yaml
@@ -15,6 +16,6 @@ helmCharts:
- name: gitea
namespace: gitea # needs to be set explicitly for svc to be referenced correctly
releaseName: gitea
version: 9.6.1
version: 10.1.3
valuesFile: gitea.values.yaml
repo: https://dl.gitea.io/charts/

View File

@@ -1,2 +0,0 @@
name: metallb
chart: metallb/metallb

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
spec:
secretTemplates:
- name: secret-1
labels:
label1: value1
annotations:
key1: value1
stringData:
data-name0: data-value0
data:
data-name1: ZGF0YS12YWx1ZTE=

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ipaddresspool.yaml
namespace: metallb-system
helmCharts:
- name: metallb
repo: https://metallb.github.io/metallb
version: 0.14.3
releaseName: metallb
valuesFile: values.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,191 +1,18 @@
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# existingConfigMap: "config"
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
rbacPrometheus: false
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
enabled: true
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
@@ -193,59 +20,7 @@ controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
@@ -254,83 +29,7 @@ speaker:
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: false
image:
repository: quay.io/frrouting/frr
tag: 7.5.1
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true

View File

@@ -1,28 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "renovate-env",
"namespace": "renovate",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "renovate-env",
"namespace": "renovate",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"RENOVATE_AUTODISCOVER": "AgCi+SttRaCTLc7WgBxUiYS56TFgY67zBsMDtRSYbCSXs1XpoTyzOiT0X1gRki8VkGUiI4KQSu/Tph5qbgLnmqrGi7SvzMYzz+vuvPNH9vTPHNnuu92deG1MOxzogybmu3utJyybImWCiv1r7huZuKOLnBhs+r3qC+tImQ6ZcBSvSvVzTQsX5MIed68Xhkx/ZJkoDMAI7MkqZsNsWZbbuQ2HSBpnAkcgUTey3nbO3168aV4Pa/fQhomyTp62v71lVmoYtPmElCrhJUCOlMSSqKxv8zNSCiZImJRTszb1+VeJF2OrfGh8PK6qVWBlY0COiy/VXuVPgJLzPmUJD5yv0qTWITh0c23N4KahNrTfx6l96ctQWY6pU9dJo8fnMLBR4RLxVnQiq8vVDrwFytzQkLtj1E1SP/DCKhk84b2ZfPji+y4teQN0L68VQeZJCvzsxzgkksqDfatOxMyAmx1prkuNM1AlUmYuba8mDvLeViSWxk1pS+qYiaZTAl5GSVHDWSCU02HmoQkmUsSfvEgHUna4Jp86io0GXAyTXjidJf+RpbQ+dTNXAL+m1/3HxhlSDG48idclJJ0oIedF/FzVD53B6QvkqGqAypYrHm4A61DZLSZrvTYE4GuHXKbF75rbvktnC07sTMm/a1tr4zym9Q7ngG3NtDA+WrCe4P/e8m+tO1GwRuC2OW2owWrhpAelAQEu0Ten",
"RENOVATE_ENDPOINT": "AgAZuBCJeukXw54Na4CR8hJT/OW4KnO7ztLk2IGFGknkwFv7olxfE8X/j9IDps+EKL5Sq0I27jq5ZYaAg8i7mmrF6IxeC25Ri2nhl9/vIzFs6h+UtbBe7Yqel7ysnQ/wZ9+cOfedOr0vO7s4edYAKwmmHNDwKFG/YwXs4XyfS/f5lMQ3W3Ecu0ML9z0wfzzzjbixaE4jPHEkTD6FegiP3tPN1xoU3hQO91Sv46hZ5eh7dDwXwH8BU6GUUb/nKeIGrKekQVrBxDt867A+sRiRtcSvhMxg7w9fFV+CyY4z9cehOUwh/Mf6BTANIoGXCFdWZcWP8RvnpkcDTaolhlKkBZ3CHbyGUPdtENVK/0mPyWC8lss2BXIl5bV7SGoTIlCxWLOdfP/RIGL+1FcpDg+n/H46jeDI+4vSWZapu8jGAaDTlDtwMvq3XbKHcpjBmIr8aKG++LU8gNrGf+lhulbFjn+VZC26+M/aGVrY1U7rOt7HTF/L1Q40k1J6IDbpsX08UiacQwvIpcgs6RZ0bdng2xY+3kEfdFVxwKGwHkL3LJrxb09P3WfSEDpV7aTh/dhXwVCfRJpiTeOopiZMRH4etXNmLPoZi2E+NZSAnYVV8BuQAS0ETopTu8WKK41+yr2r0PXbiYBLElji8ckfnMaA5//ocgwUqtaBTNs4duKRfUI6f1tunZWvlkwzzU++ib5pn81D35dYFrN9cM7I/+P6fCaUt2yYaJHzQtcNvrn0N/P6zYHI",
"RENOVATE_GIT_AUTHOR": "AgCtYOZW5mAfiMqhx4CZfPFCtT/MSMmnYOWG210yrac88zx1epHgMlaVoZ1eD9l5kIjOw86QoYLHCOXFm/aE88BbEmuCWG3fh+/sHiXgztBHwOaHBd9EkS48/aVRx/CeapNfjtqB9HpvdV4uD9/zxfRz9+f1kQiibW4cy5Jmbd69L1i4k1h4NW/9LdznBZGIaw4XM8At5sXYNfetQH/IG7OJCwZ+nG/ESfHayF2p0s44R+lkDVHNcy7193iznfFHZjgZ13dr3Sy14JNPmrRba0ySRkcsXW8+oiUckKqaW6SMFNxev5o0ys1S8dothyxDJIPeKfRLVnqkO7aWsQqpVQVFnXBCs1bA1A90gkpMow1qm2WDAa80qI79GZWbNWOyHUzt0Y7UcjPcVyiJjd2DhWBVA8vn9UGhj7FN2vU2L0zrB3DDTYP6dKmVCo61Z2P3RGP7+BFk7+bIb65cE+LZvDhM3ED94aTuZ+ol2pQWzp/Kxle6zGznS7NZ1Lb04997UKW8isrebmYsALh3ljXmcgB4X/jH8F29wqeAMsY0ystotzeRZ/rtB06qa7qmlBkYXBY5sV8GqohAxRotr6I+CKeFIf9wm4DBQaVMc7lJqstvuCA5G94Rh30QRGMInfRW0vaXukhLMpuOYtBSXLNjlYFRLxTT7tjUJdhyQqbdTNRHNIA4noABoXIXPCscpAGPhgNipoiOfadmsNZEhf/rBlAZtDTSOg19LV82RwfJ3lhB0eDp",
"RENOVATE_GIT_URL": "AgBzTC6keBSA+o7Ppfj/af7D+ebwpbDkIH7Vv8oRePPWvnbamtzcTmsamjW11Xn0w6eRGYZm6cnbP9GnGnjn6/VyCFpl+jTUm/pZNQjthpm4VEi3stvQtt9TdSnguWZILWbb60JwFQcBVNdyOlj+ow/yD39LgK28McOfSHg5zw93IX8OkqqHyI2VIZ53u5xtARTL04Hni6/KF8qY6rjukncV7pwn5y+zWvcfMICmBiazI1FJjuj6O3jznDffYmvOUOOaxCQK82OipY5zGg3sznjdJnXt6emAznO+i0NIc9xYl5qWWm9onXdjLxDzoLjxdQQNwQ2JNvROCeWs7E7LZzcUYhjfhNUBTRZb3Y1mqH4OcVKauEZcKhp5kFKM2gwI1SutGmZltMjCvbnEav7Hjp0ZQN4ybIP+yJVNNTpUtSw7sdu3Ectoe7rsSBOPgT8fQvLeWyqLL/cBxkOnmsVBmt7tTfgBmrodhYM/p2Fl1MFpxNQQsXDB7PdArs3s6ZQrgWmvvuZAk73knUxWL9vGZRJU6EbXb6clPrLNJlNz8xJjCvub4o2U8rykhTV6n/g/IgcO0GKm/vDijWgaktFS9zzQLWdpdyV2ptAbPm2o0CWW04WBxthhY8NOG7C2HcRWB9IvZByED2ghLDXA9Rzcn7VfP7Qo9ed7ZoJRlChwmY8kcFS2lccPvqKzV+oQq1cQkWpIyMzv+TZK0w==",
"RENOVATE_GIT_USERNAME": "AgBfBA0vZhMRs7SU2zMaVzYzCQ01q4wVBfU0YkYbs8WrWAPVl+1DWt+c7q5XaBU5QG3hXCZKiyXTq8nldTT06zXswd41DuYN473c3de2PJwuLpyC3Kxp40od7flcMiUOGznm8MlPtn/SteN4GDIifud7y0sawbvNsm5Mfqu6aqMRQamzhxnJw/hUXXd07O5G1A1QXYmSiWLKAGykjdOphad/OScW+gJ+gNjnh2RsFhrUxZqP7Lwa7echhTkMcVmfFX0BQFK7pRbAKqQwxFBWWRbFQvfdYTqOqkkYa4abEUzNz8rcVbsDTIGVJHUGxMWcy2CkqjeeAYFO96NIr34LQ92gF+in6EQVZhKmbdOWDFhV7mrV84Wew7qJLnzWVNOAacm+E1cSh1pWBX71SIJ9oTHm64Lz8T6+YLR/WxbETMKs+HoYBZRnyISOTMtnQFOyiC9rPhBpUFtC8Q7UuQlntiXubz/JtXxu1mbT1Rq69y9QDRObIE0597XxOrxMCuwCOWU27SrbQd3ne923d1UmSWpaN8O5DYuu5GTJQuG6C669uBzO6L9f2iL7ykLZTcs37i7rAzLqBjWKwCm6zyz2MdztXhllJNgblYB943Whx8Rw9GbAZnmapQ8DAd5fzPaR6FjhUuZ/C4XnmhsQ8h81iCssWiI/sXW1Jl68mP48/aMVXDJCar+9avMls+p7pYjt7g+/RHQ6SehqmA==",
"RENOVATE_PLATFORM": "AgAWNczM1n5N2YtAFClfIRRJ3l1iP34fSP/dx+ffbtDsfQJvp8EqJmAOIyrSXbcY9aqST6jGQvTaHpUMBbtaUXyakAn5sGC7CtAJYo5pJ6wKUygJ36q5uhpfj4ocyqNZuipBEVMbPcE3Jt+s0bnS1bmQRCQOSSm89nWf669gZGCfgVEvEYmHXGmXLgYbPTCBhjLLPyx5ZzeVoF+D+RG945a/GtOxChRAkm4eTFMJ6gzzSThbi3/9rFWohpXM6VJynpv8/X1sYEbaUuGh4sYSxYK9X9YGYR9vn+U5uPBvpBtsctvHY3JoLL/9pNKw9fu6JltBm8ynItV+I2nkssP4UROkVamu8Q8YzXuXYS+H5D5q74qy+J21QgKIKzxhQ08gvCbYH+1C2x0NSjZtjqKHutzhp6rLAEXQrMUS+1HwHxYzsxLJZzI2xwPNc829EwSWq/VkeS8jS7sUU1oKOikfbxpatvvMUX6t0VNDlsFoAbcTVrOwipTs6Cosu5ttiwr213KkuZ6eTWUnOxyZgLyNYBimuwhcvfEazmn/VG74qWTCRM2b7EtBcj/q997K7euMIEPAYfMZ1L3tr58szJ/ZSUYoe3x5W5DOAwv7Ut9gmtf4GlmajLkUmgP/bIInae8D/LweWXnPRb7PrQE46za7aNVcMBN6xLVeuIStA+g9dSsYpVfflhbGsuTAKo8g7oveaHOuPxKARg==",
"RENOVATE_TOKEN": "AgB2UWMGLKcFyG0PTMrykaZaUqFJmM5JEPYAMRxzMWR00JfAXjnArCkWFZtyQ/oc7YKmHUGE1D2ChFYJrxZzCyRwgukLf2w0vUh+lfS8Iv9bpZv6PHhGLSXw1IaBan7Bm1f1GN5ig1S4jrZNqCJI4Lc8w/Wb8Mtw8Zsk+toBMZjaa7bfhceoFKhFbaKRc8LDMCwatV0gGM8m8TPji5rpCAJJ2xd9HQdDkoAFu75aYV0qbXsE7MOm5cjjqrxKQWtevOWYi5tcbUvwQFm+1mAkpe/52a61j4Iu1hIqjTb8f6ONu8ut88UfpbYoiZNgtjASrbCYAiEK49qY3HAHcoYhnuWHdREQbxjOk2f/vx12QXEvs/1QzdK3CF67qctZ9QEWn6xI8MRKw/KiUWCXjh8WEQvdAhQ9MGLH1kWMLq/km+TQm6N6gDjbCo3kov/qhLl4iqMJYR0A4mqJGN9yJHG4kC3YcYmOElw0K8/P4zKR6HBJNCQU47+pvhOnaiiMyH9uOOek3Sad52mRdhFa9f7o/Suojlep0W17GETvyz5Z5W4YqxBk0qPcVc2dK8YkpQnAoZ8pJoJSX7N+mI3jsHyTT3swu6PR1sAL9owpV2lVcs3GWDN4I8IAmQ4ZuBXhfkRDnLq7EYhD0HyZ2w1ySfHc8P43PfTBrUFllkDuxpwv6LuimTuR6kgVAaSAvuIrOkfq53OhWtYWRJzt5JPxu+V40QpeSZXetLb/iqEY1lgVEYeKAEHc5SfPwtTN"
}
}
}
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: renovate-env
namespace: renovate
spec:
encryptedData:
GITHUB_COM_TOKEN: AgAUbw4S60DvCn+tRs7KGFkW0geB9OB/H2UzUonoHDShrMugMBL/lXN/ICE+kIRFX3GMK7DwCP3fxxJpJb8v1WYe8fjwFsQTB2cuHYU/HRJ7/Df1iesG5FWZXzeE5WdLFz5r+gunisbcjrIvnY+r5xO4Jxhd6/EnX2AKcTPldp0btj0+Axut+Ox4R7sNjXCfYNgjVfKorGwUcujG/tnu6P6ggNNjE+OsPMd3xV4KuY0EE4qg6hHwzoEXrG9r/V2XHyan6Y7LMPqxPhFd4Or6bgEJQThqqPMua7wLtARyayc6iwm9MB0U5/Z3/RB5kE6e/oXB5RuBONR6dY6H6rPoRoXGRHEdkC/3PzDfGaGSH+geleFHl1lwS+hOGj+icCJUpAoe2VHjvWbvWBxfZb1FW0MFybiA11zR2ki0auuHBXn6BV0qk/zmG7VzNJkBnF4Dtvd9aLw8F1zTulKY7C8APDeXRghA4+ggVDjCiyBmiw7qQDGyt3pEVANCNlkpATVNB2fHXDZqd3CHsvk3hkqPImTZx2dhZAeskUVzeZLqeIw5Vj2tRzneRchxXh/2d5qp5O2s/bfc+2MwtLB1pKDb2k6c62SBz4eou1ILq0C7DSTTptGDQ2u6YabcW18sb6kN7j+G6Xz7RR8My4uZCclRzwYkdXeePlckVYi7jWhP21nNRfxT0CtgyPUnRfEcRzrr9k+OE80hugJxZtq9QUrXaFNEe0ikhe+FIRjYvAy62ibVoIHrev1inEAx
RENOVATE_AUTODISCOVER: AgAShtle7hbkHoQ9edd2mawUGPaAdYk2rW0+7W1frPobEAmkQ0MXgk+6S0MWqZ4k0YRSfBVXY6sNpX2FW8ZPTl6EZOKf4gl6kY+ZhmAlbdCInapabCFSHln5KRaIfYKk42KacNp0gvfEooVNVB3xzwSQvvbmDsN1NfhPjUf+0xH2yoR8FimF1OeDOMWwkIwbKtG/l2SFZ8XvpUoArSy7WCLZ0BTqMUHG8TK+VdRrS1ZvBcSE9QXtg/jK5iM1N9Dnd3ho9nBqC/GaBuOzqJ36d36lVyDen8/KizHDFMpVTzQTsXZW44NWQHjbJyTsx+VzktWLuurt50nKoj0VQumvMiramjINbUIh6JhyKlvyMirRCog7eQBDho1I8rC5xGfcDF1QimyZhFH/Rsgit2KrWSswFOAJotU4wcJS6HJccLsm4OJP7A+5/Iy/HuD+a+5xKi7ZUnIyf/TdL1f0jWGSGYNEZCWS9c6SQBbwUT4bNMV+2kbTte2ATKqAP4f2L7qnjYKqtFj0OT/L168BXciimgktDyLc7jm6A+DqlDHa88p9mTeNT5k/eg9PE5JfIQki9DW7maU5kcp6FtqJabTHlHaMf65IF8zc1/UTKJyAd91lmyJdevwIDKPWB+qq5scbHs7R8jyhgRnmdgp4BDkq3lUbcwNmDJfXG8gJO+qxV1dw/UlBs9o3NS5AcnF1rVTwKguzDWqA
RENOVATE_ENDPOINT: AgBL0fiAo7+lYc0CC2smzyt8QIlflx/6ur991hbpoJ2jPn94kO+9ECT5qhMNeBIosF8Eb9G32A44XbCjHncVSJQh1JEKB9/brIx6EBKjShkqnL+QAz5MibYW5yoykowvsdi0grB8rkeEvUOlKTOL+HmRrpS4JqDYeqaKOibdYP68bKIWmsvBceM8hLD+l2p+7D0iRYcFSxhpjR+gIiWmtwgkaAxt9cXmBalBuUFrr4wfYzxN3uav0+TPwISk8HhVWVNQCz+BO/HHQ+COQ8bN1Srxa6cE67LaCUSxArcX8vl/PCl9atf99F98TJ7S1R7MCxTLpPyd61rvwgVrBDfV8IUBqJ6LqYfyigMKUGPUMTVmihccWrgJr3lyCGC0FcPB1F1jBromMpG4CDHewvx+Pq+KzpoH+EYo/se4yUxhUy27vl0yh6m7aFBOvyixq6F/nKDDm/wa1e8ntflfjiB3JpsgvTXZPdvnBNY6UVDjQm1AgT/IbVJ6N+ZH8ZP0gD0ExqsaChvEaiEmI8sZSTqj+TZDAUQWDHRfRN3X4RtRLsbP1ZzUKgQdtF+dJ0CdcEOanVxRpzBwpxm3Bwd8sYSbxDb+GONQF7ZHkSzAx/hKiE/yQ/f/UWs/pNrHuBcIJ7kn5y0YjQ/7KBiHz4qJU3vSTr9LvhN71z/CLqnCD6udhKAc4qShWM0ozMBQO4b5hfQ8hHtCcxPva7shF3NEoUlfV84qVjbl62jNpNC+Z5TSghgMc/dO
RENOVATE_GIT_AUTHOR: AgA9rIb5PcqeA3AdS9eUv9CEBmSI37s/VrpVOaStL/CKVQTnlzTF5zEoKxVxuOd4t+QkgZqxrO63IIVjyeBvqL2MHbdkoOS1jAhIeSMqJkYB5PCozMOY5puWTd0TIAg85BMpjvkrGK6SsdLZ4O/TEDzeagtvHFfNhW/6wwQaBX+NUWJUa61KV9xHINDjmCrIPot23t2kioINxgWG7yydHIDvgXF11yN1LrmcZdntRPUvaCcQKF8pgemWn0XhN6JEC0/8BEk0AWq3C3mn2VqnqKoAxCnM4bJ/gTw4OK8kTL2XlolTSxf53UydqUJG8p/Wen3bgd82wXYPLeISsuLplvCxhEV+puHHld23q0oqpyiTZSOC+sDjyP5H/5iHwTk79YBjrmmjO0lQjRpZuPVBYqnsiNXQbqxRA+9S9ceTFRfPglYmshocTZ8xFypaGCrIHdi7KqSIGl7NA1rMbN1NMN/eJ/JqkD2R+EnAEagcdPFEd9tYWAj5hAIWcsApdbWlbc/nCMInCtlJnLceH7JbqY4o513xv2vhGZHefShadPQnM7FNTrV7cRQTZc8MnsMPUrMsmbsAAEqMBKbCBwhV0UVOKbyLEigiXvpoigf8LUtiAS8U1b9G4xMxNs31ayJ8th4qAhBRabXqzNCIAcks8FlZSGp9SDs6WiETOl6auZyLk1nSScshN03zSudQV/Kzjj7gKXbaUkyni9no4maZpjPTRrbyLWZMGTnRKDSgeEe5W0wH
RENOVATE_GIT_URL: AgAOqVnkWLda7l7jJ3BuYnuy/HLV98aXarO5lJYcH2BG8u4QIShqFnLU7bnvivjeQDe2OM2qckm+1JvG3L/ggXkPt0m3gLcqMqoRYjqb9OUYJBUivCjjdWg9ylE8gj/EZqqcB+duLmIu58Gc+sboVMUJtGhVXmqk1jotHcKI5KojjPPdVDpJGglqiRbCvdQP6X21YeH1RMurG0i3isak6+qt0PclsdLWfOaK79vNKZkV3XGqeauELrRMv3ZaaCkLsoYiJz4l+bS4BgFtiFAFQJ/rad8vZEUgW3RzerVArUvl1I5tk8IZX8DeNugS/lfZVesQ48PCZV3ok+DOFnc2nITFFYIxTM/XzUl9VyQg8eQqfs6gsekypRkichb6Crw3e68AN6mUXQCLbIe6RajjIT1q722hwW/Vzivt4TmXuzPIQs2IjCWVnTf3cPAoGTT/YrFi6+PRl60FIhqjhJamgvWg47rKfLzuJ6aDVhxI3r6E4oVsU7AiOs0gYS5t7Jec/5bWxew3/S7mt2QBh2On78z7hL2VVyofknpa7tXZEAz8dEptjzGLkhIcKfPz/WNX0Imvzej87ihKx6/l0YrSpqhd4Bx0hlYZi8+TU2A5pbhFEAoDAOuvn/F/13UiLwh2yWRGGQsD6q94dy34WJNfrwUK5rnNIvRKErLKcQdy2VMS+9uBcfm7XAH+mIQUDvdHfzpLOK9ZHZakLQ==
RENOVATE_GIT_USERNAME: AgAqpRl7jvkJigWFaLgjebC17azWigiIxyiiA2ohfOPvB1y8BrgU5hxHj+ONRiu6UlKBuN1S8rNH2jqW9XN+8qHxZ79FF++27T/gnTHL0UEHmYYZNZiOphi8Ei0nixf6rMVCS3P8GW+2mFsuVWVn+AGbuWnSWYT3ILMuoAYQ4Gipz9k55sqXzen0mf+hUH+cYm8t3UCGXmXWm/GQZzKCfiB39Cx6qMzaSQQ0lkmJcjHRV+c3GeYPcgsU6HOwa1yubIpD8p+tQYrMRdl9M0ITgWiGFBJWUqYx59wDF2Agmfb2HBQj5s/ixG9GosR7WhafXCrqWf3LvClI7BM6b0iSgR5l4AdgGpFyBOzeU5WDVjlENcOZucRR+i5gRjUEPHJX7CQgvLM3/6BsOGtfZAn/XA25f6WLXNx/vrDIhS2KucaoA9jhwwbMNKo3jbWyUFilvrPowLWlB7Tj1uDOrXyte3sBZEXhsYOaNupqIm4IqTOW/uCY89SSxLtUVtkHGNSXfOW4waEMZoafgzsLE3C1W0UelXm4Q0BfHsdZlzkapFW2E4jGAsnTt55sdwsDy9DdG2/EFQ/ugPfqQlEmLRrEv48Z2nIuUcUWoIfP/5WyZdxsDLGfRFmZY4OLyEZ52ul0SXuwPIqsYuuzrAIvJe3ou6XKEWS7i+DbUX7cx4/f1Bk3IqZrZugNmRaMZlDrjitLX8DQfK7L01Kskw==
RENOVATE_PLATFORM: AgCwa6VX+N2GRSu4lJEmI4BmK21IXq3SItRU9HAIsNv7kkJoRFtY+mFmWqaUF757ZKrK4Sf1H+l3f9sCgbsLujN6bMgqPCFtGR5v4d2SdHJVrhQ6q0xsjAm/AvSZknNC3h8/1AyS6nMTjhJvA6ns+12HT20n2SjaPjh/ug1FQqe0NGHndFKN+ttsXUhtpkwyACQQYkWYsfXJRpwKfqFSdEjA5CTFsHKHAiGbIYt7KVi87hf63rmWPmzr18jNKflHFTbwO/r+/wo63SPvKDjuvTe5mzVVtP+Y8GykGI2d49u5Ebvdd/CnoClyy/dEVJ630kMe5PxB3Pl0Ai1LpDk7uoTbvVkylFtW2/B+9UWKBIy7HTsgKtbQJHZI+fqw6MMH2vLH8ey0SlRCKxA0ZSsx9aWIVfoytnmPGxnAYbHxcKbYIFQXlfnE+M8EiCIBFYEVJL8sk0jxgAkgMRgc8M7MjXYif1EM1nGHUeivO1m5KUVAF6PSL07YR2JDu32KXfEVw9xPHeTySwE/Hfzr3Ce+PjZ2WwA4MIwA+v7clBwxvpBOlby5g3/CudNUerOg0WW3dt39AyDbD696Ybjml70NUHMecCnRGXVhk/yJUzUWaWyRlnipmaID2WMfFfQ0BHV9T3cfmQGmKOBE38SkEx74kuLAesU4xgfNJSVtAQKIjMruJg92erXK9ilEGHps88Md/r2yKjvo/A==
RENOVATE_TOKEN: AgA3YGnzCWbp9+srwWRExEnCO7Gv/oCVruJLiyp1pdI18qM76GugUOsPRwNydYbi9g3I/a8fCGaF0rAtkKR5Vc3TVo2/m78fc4qRYVGpgtKgcJSwvlbx/pfoe9WU6Ynd3QYigsN0WWfUNmMncQb2I4UoYR7e/B5B9FqNQMC6XMzp3hxQhRLH9Eba0AH1U1HQNDnAaEp7sXNQ6KRvdguWU+iB1DmO/HLAAsFa6Bd2fgO8Vaw0TcFpqKbkBCUoaKVmQT7eWCN6g1XrkTpPJTkARnlhdbb23sPczF0zUJ/WJp2BliKEfey2zzKk8qBeCI917114yPm0uvHFxc4moTnfLqp1AFHTFkFj+soVTe/A8Br1iplmE4xknLZJFe/XA8TSyVkctKlAHLwDewOV4v4YvU7W5FsREyBPCEtFpszsAgmsxSlUiwRQzhW4s5JBb6oEwlpFqWBgvGZzzVB0e2DSostAk1MDYF6RQ2xinYB35/P++qg6UdWSwHA35ueoODP/re6rRAd/TS6bijEzCmSWP4nnwRWAGYzyce+d+CCTRW5QCbGpLru+Uli/wwP0C8QHlM/iWlopsYxG0AYyvgg8operPH81D3hzuVYiRO19dqzQKNVSQYN5k63n+PmheJYlOrz+91Aw4Tna3Y7v9GVLQdtzkVVJ9+kJLnrX5C6qov869jVg1UDjqXGP5XZSEUa8zMy8FOb+RPXNRpOJKNyosF7IUrsOoZOjg09tu7W9TOYkpZFi+WCM+se7
template:
metadata:
creationTimestamp: null
name: renovate-env
namespace: renovate
type: Opaque

View File

@@ -7,15 +7,14 @@ data:
[ping]
[global]
checkNewVersion = true
checkNewVersion = false
# renovate does that
sendAnonymousUsage = false
[log]
level = "INFO"
[accessLog]
# format = "json"
# filePath = "/var/log/traefik/access.log"
[accessLog.fields]
defaultMode = "keep"
[accessLog.fields.names]
@@ -48,8 +47,8 @@ data:
allowCrossNamespace = true
[providers.kubernetesIngress]
allowExternalNameServices = true
ingressClass = "traefik"
ingressClass = "traefik"
[serversTransport]
insecureSkipVerify = true
@@ -64,25 +63,28 @@ data:
[entryPoints.websecure]
address = ":8443"
[entryPoints.websecure.forwardedHeaders]
insecure = true
# forward ip headers no matter where they come from
[entryPoints.metrics]
address = ":9100"
[entryPoints.traefik]
address = ":9000"
[entryPoints.dnsovertls] # route dns over https to other pods but provide own certificate
[entryPoints.dnsovertls]
address = ":853"
# route dns over https to other pods but provide own certificate
[metrics]
[metrics.influxDB2]
address = "http://influxdb-influxdb2.monitoring:80"
token = "N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ=="
org = "influxdata"
bucket = "kluster"
[metrics.prometheus]
entryPoint = "metrics"
addEntryPointsLabels = true
addServicesLabels = true
[certificatesResolvers.default-tls.acme]
email = "me@moll.re"
storage = "/certs/acme.json"
[certificatesResolvers.default-tls.acme.tlsChallenge]
[experimental.plugins.traefik-plugin-geoblock]
moduleName = "github.com/nscuro/traefik-plugin-geoblock"
version = "v0.10.0"

View File

@@ -4,17 +4,13 @@ resources:
- namespace.yaml
- pvc.yaml
- configmap.yaml
- servicemonitor.yaml
namespace: traefik-system
helmCharts:
- name: traefik
releaseName: traefik
version: 26.0.0
version: 26.1.0
valuesFile: values.yaml
repo: https://helm.traefik.io/traefik
# - name: telegraf
# releaseName: telegraf?
# version: "?"
# valuesFile: telegraf.values.yaml
# repo: https://helm.influxdata.com/

View File

@@ -0,0 +1,29 @@
# apiVersion: monitoring.coreos.com/v1
# kind: ServiceMonitor
# metadata:
# name: traefik-servicemonitor
# labels:
# app: traefik
# spec:
# selector:
# matchLabels:
# app.kubernetes.io/name: traefik
# endpoints:
# - port: metrics
# path: /metrics
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: traefik-podmonitor
labels:
app: traefik
spec:
selector:
matchLabels:
app.kubernetes.io/name: traefik
namespaceSelector:
matchNames:
- traefik-system
podMetricsEndpoints:
- port: metrics
path: /metrics

View File

@@ -0,0 +1,19 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: physics-application
namespace: argocd
annotations:
spec:
project: apps
source:
repoURL: ssh://git@git.kluster.moll.re:2222/remoll/eth-physics.git
targetRevision: v4
path: deployment/
destination:
server: https://kubernetes.default.svc
namespace: eth-physics
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- application.yaml
- repo.sealedsecret.yaml

Some files were not shown because too many files have changed in this diff Show More