Compare commits
168 Commits
e317c77ed9
...
feature/un
Author | SHA1 | Date | |
---|---|---|---|
d143a90228 | |||
1ad56fd27e | |||
773a155627 | |||
61945b3507 | |||
4aa21cb0cd | |||
d233ab96eb | |||
df581e0110 | |||
8a114b9384 | |||
ab6506f4f2 | |||
87242d293a | |||
11d46ec295 | |||
1b3702c4c8 | |||
9b68b4a915 | |||
18889d7391 | |||
a38ad1d7e6 | |||
edcb9158f5 | |||
71b1c252f3 | |||
b30f44d2c6 | |||
85abf0fda6 | |||
5e21ceaad3 | |||
3f5c1a5a5c | |||
0195833fc3 | |||
64835e16de | |||
4e11a33855 | |||
bad024861a | |||
fe5d6a9014 | |||
f2898d7e0b | |||
f67f0c8889 | |||
0ccb17d8e1 | |||
bb6d417937 | |||
4e2ebe2540 | |||
c5310b0f00 | |||
46ef973f70 | |||
c12d2dc7a6 | |||
e28c6ffd52 | |||
7ba6860ea0 | |||
33c23ee42b | |||
b2f8c8bced | |||
d5277d3d6a | |||
e3c90f5ede | |||
eb5bda63db | |||
a10a216f0e | |||
3cf9fd0b87 | |||
ea1fa1637f | |||
96abe2a0f5 | |||
9623f33b59 | |||
b065fc7e59 | |||
617ed5601c | |||
7e21ce4181 | |||
eeaed091ab | |||
ee52d2b777 | |||
384e9fbaec | |||
606aded35f | |||
a3aa8888e9 | |||
aaeb43e9c3 | |||
a9b1d02a7e | |||
76b49270eb | |||
9b57715f92 | |||
85a96cf87b | |||
78b4be8fbd | |||
7bc10b57ce | |||
de26a052e8 | |||
28ff769757 | |||
6a58ea337e | |||
2af279c161 | |||
c26997ff83 | |||
a354464f6e | |||
268a9f3a7a | |||
4ddeaf6c99 | |||
b6f9a818af | |||
f4670aa471 | |||
72a2914c24 | |||
1d5bc8a9c1 | |||
892c412fd9 | |||
b6f7ead955 | |||
f033ba16eb | |||
f3ae2c424b | |||
36035ee84d | |||
50679b400a | |||
a68fb5f0a7 | |||
5792367b8b | |||
3699b79f1a | |||
e473abda12 | |||
f67f586006 | |||
61e1276f02 | |||
111fd35fc3 | |||
cc4148fb8a | |||
f1e624985f | |||
c8d7d3c854 | |||
4880503609 | |||
f905ce1611 | |||
ecfc65ecdd | |||
7da1d705a4 | |||
299cbea97e | |||
b633d61920 | |||
bfb8244e59 | |||
33c2df9fa3 | |||
3d84d6bed1 | |||
cf6a931097 | |||
53c3865072 | |||
d09a3509af | |||
8c0abc16c4 | |||
399969677f | |||
762756310a | |||
ec964be7c3 | |||
0603da76b2 | |||
a437c4228e | |||
d5aab95186 | |||
3acb329730 | |||
73ce4e340f | |||
0d4b6f4605 | |||
deeb35bbb6 | |||
d4c658a28c | |||
1fcebe033b | |||
8fe51863f4 | |||
c4eda4e75d | |||
9490015728 | |||
a641df167f | |||
21d100fb62 | |||
26b06c553a | |||
d51bfcf7db | |||
788c2436fc | |||
c9e6d08dcd | |||
6b2e9f7165 | |||
8618468534 | |||
94d6c0f523 | |||
9aca8e9e0b | |||
72b7734535 | |||
28f33f8ff7 | |||
4cf26679c6 | |||
1cd4df8b8f | |||
adeb333954 | |||
e6bd080c6e | |||
c9f883eaa6 | |||
014309bad6 | |||
c61698fad9 | |||
8c21d58529 | |||
722b7c3fb6 | |||
b852da0321 | |||
9c5affeff6 | |||
b6c2f57acf | |||
2e4e033c36 | |||
285a7541ca | |||
dbf58027d8 | |||
2f9019b6ba | |||
1743ffca74 | |||
ea7527c143 | |||
c27b289866 | |||
4cbd95fd78 | |||
5cfb2a02e3 | |||
82559e848a | |||
4600c79ed4 | |||
e733e614d2 | |||
d4e28c96d3 | |||
2f31cd6934 | |||
4fdd4a39f5 | |||
23f2e98194 | |||
8de0427f61 | |||
62abc6637f | |||
2949e894d6 | |||
40407ce7fb | |||
388518ddf3 | |||
731598465e | |||
3027df2fdd | |||
12ece4852d | |||
a5f1a13b22 | |||
1686178650 | |||
86d32efc64 |
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "infrastructure/external-dns/octodns"]
|
||||
path = infrastructure/external-dns/octodns
|
||||
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git
|
21
README.md
21
README.md
@@ -3,15 +3,30 @@
|
||||
|
||||
|
||||
### Initial setup
|
||||
On a running (and sealed-secrets installed) k3s instance run:
|
||||
#### Requirements:
|
||||
- A running k3s instance
|
||||
- `sealedsecrets` deployed
|
||||
|
||||
#### Installing argo and the app-of-apps
|
||||
```
|
||||
kubectl apply -k infrastructure/argocd
|
||||
```
|
||||
This will install argocd and CRDs in a dedicated namespace along with the app-of-apps configured under `kluster-deployments/`.
|
||||
This will install argocd and its CRDs in a dedicated namespace. The app-of-apps is configured under `kluster-deployments/` and deployed as well. This will bootstrap all other apps, as described in `./kluster-deployments`.
|
||||
|
||||
The app-of-apps will bootstrap a fully featured cluster with the following components
|
||||
- postgres instance with backups
|
||||
- backup of all nfs PVCs using restic
|
||||
- traefik (along with metallb as a publicly accessible reverse proxy)
|
||||
- an nfs-provisioner creating PVCs on-demand
|
||||
- a range of selfhosted apps
|
||||
- gitea
|
||||
- a range of selfhosted apps:
|
||||
- nextcloud
|
||||
- jellyfin
|
||||
- adguard-home
|
||||
- homarr
|
||||
- homeassistant
|
||||
- immich
|
||||
- ...
|
||||
|
||||
|
||||
### Adding an application
|
||||
|
@@ -10,7 +10,7 @@ resources:
|
||||
images:
|
||||
- name: adguard/adguardhome
|
||||
newName: adguard/adguardhome
|
||||
newTag: v0.107.42
|
||||
newTag: v0.107.44
|
||||
|
||||
namespace: adguard
|
||||
|
||||
|
@@ -24,6 +24,8 @@ metadata:
|
||||
spec:
|
||||
allocateLoadBalancerNodePorts: true
|
||||
loadBalancerIP: 192.168.3.2
|
||||
externalTrafficPolicy: Local
|
||||
|
||||
ports:
|
||||
- name: dns-tcp
|
||||
nodePort: 31306
|
||||
@@ -46,6 +48,7 @@ metadata:
|
||||
spec:
|
||||
allocateLoadBalancerNodePorts: true
|
||||
loadBalancerIP: 192.168.3.2
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
- name: dns-udp
|
||||
nodePort: 30547
|
||||
|
8
apps/files/README.md
Normal file
8
apps/files/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# File sync
|
||||
|
||||
My personal cross-platform filesync. Using syncthing for my android and linux clients. And nextcloud for my ios clients.
|
||||
|
||||
|
||||
## Overview
|
||||
Both services share a common persistence which allows them to apply each their own logic for synching to other devices. The server acts as a relay.
|
||||
|
11
apps/files/kustomization.yaml
Normal file
11
apps/files/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: files
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
|
||||
- syncthing/
|
||||
- nextcloud/
|
16
apps/files/nextcloud/ingress.yaml
Normal file
16
apps/files/nextcloud/ingress.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: nextcloud-ingress
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`nextcloud2.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: nextcloud
|
||||
port: 8080
|
||||
tls:
|
||||
certResolver: default-tls
|
15
apps/files/nextcloud/kustomization.yaml
Normal file
15
apps/files/nextcloud/kustomization.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- pvc.yaml
|
||||
- ingress.yaml
|
||||
- postgres.yaml
|
||||
- postgres-credentials.sealedsecret.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: nextcloud
|
||||
releaseName: nextcloud
|
||||
version: 4.5.5
|
||||
valuesFile: values.yaml
|
||||
repo: https://nextcloud.github.io/helm/
|
17
apps/files/nextcloud/postgres-credentials.sealedsecret.yaml
Normal file
17
apps/files/nextcloud/postgres-credentials.sealedsecret.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: postgres-credentials
|
||||
namespace: files
|
||||
spec:
|
||||
encryptedData:
|
||||
database: AgBOgmqlfgiN2VqxNyYL6O+/jdzPmGg97zOXxZ7KiD07b4/2FdmlWgOZZp7oUpQ9RMV0WybC0jau2YVlgXB32afgJ3uinaAAhzZwvzy8dgapNpe8ClxnFINRhKKC9kxK7YeDwtptbDQn7YtEmVGHI66/71VyGy7NME4Pk0Y4FxxpF6KAZMAHNyez4JMa9V+XFtYV5G5bOkPY/ku4LcYntiMAlEaArF+re1m5nLQmZ4SVkWlOc41N4Hv1HrCv8qq2kj7zVR5/J2qW8NlzmdJJqv1AP1foELuITZZKxwNspxynNxhjXTX0fP6vzfJpxtzb2s/4Yh2uT/UPb2rOdcGaXjjHKxjSX23tG5ZT+z5lt0y9UEmUYytlcsYv9vsRqCmeFsB63S7aABeCRSOJyGLsuUc7xqSZ2ijDG38qLij+JPgoEIbSLfRYVGE5GMo9EbHt4N+ZIMpJYQXq0VhDip/r11SENfUa3XoautQ5uVR1D50FuSrN16t24bQXai9uifkBpDyvqbiqgv7s3qOjF9u8I0eyeJA0ZO1JO174B9SO3IcZYys8c87fSuWvFbGepLNqfneSIx93klDUdx3YEjqcrqib49+3/dn3RO9/puyhJ6O0TEZneToyauV3lxpR+XG/PDx7EQ88lELgD/AmtulsLHkYNgpoblFPbgDUeHhOgoBRAe22Hiy0Co4eh0SPVPyKhj8MyYhPtLEV+UY=
|
||||
password: AgB2eY5aKJhEcJIgArGRrsqYf5pJJoXHRkplFpaqCCQW7X7WLREb+35HDijhnJSWRI2/LXDVy/8ArJe1LiiW+05aRY/9nvmjdpUmvsdQ6DK1mvirl8Py4JYueNrk2iUmI1h+ROyubBCvRBKxueQNkuwipKvk7nIlON6cwFnqp6GPcuWihSG/GZ2nSZmxmu+thdsM/S8DPaTW/N+Sut8DyarlCN94ZRiFVZIJialibfsJGQtL/uPX0W61GTkEU4m34IN9e+POdEdg3HuFMd3RvNQpndgPjaTv4A22TJRFs+rcHlcHr+5r8acVy1V+sZy97126Z7moeKDp1rFbG2/yMT1iS2oxQN4GJceTgMzSagqdn+KgD0N38OYvp+mRUQsl7+Fpglcq03vqbvxsc1fC78XpAAPMNA/pQDvtlS1qjuB7WCa5b3mkJxjc8efIuna9GAnDGh+djhlGHLEERnEfjlnpeDb/afRejUX+i6r00GnBxuRJfV+lKh4BJsnJm29nC4t10F6ff91Ngcjf+wCm5yWSFETZ9oFrPn10igGvoZwROJYABdtfMNjidGLkdKQnG1dj3EFu5XDn9vRqt8Iu/dEyoh7a2iGYDQ1lGpz+zxA/OZ9l/SuL6JUUwXq5W1/fSbtaBPdit4cwUTopq7AcpZkMuAQyVy6N9D0Hvjx432rCxmqyGU8PyjKHoAN+nuvTi79HtHR2wo4hJeIDoktdpxswSCe9VJEvqTFGQyCZtX3uEg==
|
||||
username: AgATMaQ/BRCO9vx329YxGGUGl3E68Tm3coU6IO6pYm8f+Uf7ImH4l/P84mjGDLho1zBUfILPAvM4G5xG2qkkyW4mEuB8A7NNWAhXMOS5i1msNaV2oqLYNWCOG2lFO7unkYwPSyu9EyGn/Hq/kbGPAKfUf6dtDLEc+Y0S5Ue9YA2gYK4VYUec491+02EOoprGcfM1QdGPLBrunXn4krxtGm+eTsK8nd/lnm3DK+f5uGupO844i8T0mXE1xcliysBTZzxEVpmzPN8q4TMay6qcB2wOvEyngnGCfxJGTSjTrkydPFLcI4p6IONW5QAX9eQwo6ZDo56WVNgvyNW+ZJ6hmPP9nLeHnKb3rM91CIMM0GDRYc3VFsVXwBY/sj12hiompXEVQEp+EJUbgnDLK2lW+J602ZnzyHFgwGKnfdI8PHfKoxRVf06TXPdROu1mfXr5jOXc+++LoRotkVOuf2KXMip/7HlTkRlZXKkenhIqrTtQkENJ+aaxCKdQwgE8iDtmB6ZEBiMJq/dZgvn7qbcMc/SYF3l6YZKSU2L1359CRTeuQ6J6aDml+WHvgtwLH6sIgR9Sjgxid9XlhQ3/8f9UQdR6OpblsBZYn8gYEQ1WRr7H1R3IjENpA7LtburPYyogSk4eSFWR1hkwfiiTJrfwJCPEka28a7MqX0nCKZqzzUOQqXNGPX8W9rU8aA2HcnSPrzLoOV2av9h4icw=
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: postgres-credentials
|
||||
namespace: files
|
20
apps/files/nextcloud/postgres.yaml
Normal file
20
apps/files/nextcloud/postgres.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: nextcloud-postgres
|
||||
spec:
|
||||
instances: 1
|
||||
imageName: ghcr.io/cloudnative-pg/postgresql:16
|
||||
bootstrap:
|
||||
initdb:
|
||||
owner: nextcloud
|
||||
database: nextcloud
|
||||
secret:
|
||||
name: postgres-credentials
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
storageClass: nfs-client
|
||||
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
11
apps/files/nextcloud/pvc.yaml
Normal file
11
apps/files/nextcloud/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nextcloud-config
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
155
apps/files/nextcloud/values.yaml
Normal file
155
apps/files/nextcloud/values.yaml
Normal file
@@ -0,0 +1,155 @@
|
||||
## Official nextcloud image version
|
||||
## ref: https://hub.docker.com/r/library/nextcloud/tags/
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
|
||||
nextcloud:
|
||||
host: nextcloud2.kluster.moll.re
|
||||
username: admin
|
||||
password: changeme
|
||||
## Use an existing secret
|
||||
existingSecret:
|
||||
enabled: false
|
||||
update: 0
|
||||
# If web server is not binding default port, you can define it
|
||||
# containerPort: 8080
|
||||
datadir: /var/www/html/data
|
||||
persistence:
|
||||
subPath:
|
||||
mail:
|
||||
enabled: false
|
||||
# PHP Configuration files
|
||||
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
|
||||
phpConfigs: {}
|
||||
# Default config files
|
||||
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
|
||||
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
|
||||
defaultConfigs:
|
||||
# To protect /var/www/html/config
|
||||
.htaccess: true
|
||||
# Redis default configuration
|
||||
redis.config.php: true
|
||||
# Apache configuration for rewrite urls
|
||||
apache-pretty-urls.config.php: true
|
||||
# Define APCu as local cache
|
||||
apcu.config.php: true
|
||||
# Apps directory configs
|
||||
apps.config.php: true
|
||||
# Used for auto configure database
|
||||
autoconfig.php: true
|
||||
# SMTP default configuration
|
||||
smtp.config.php: true
|
||||
|
||||
|
||||
extraVolumes:
|
||||
- name: files-nfs
|
||||
persistentVolumeClaim:
|
||||
claimName: files-nfs
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: files-nfs
|
||||
mountPath: /files
|
||||
|
||||
|
||||
# Extra config files created in /var/www/html/config/
|
||||
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
|
||||
# configs:
|
||||
# config.php: |-
|
||||
|
||||
# For example, to use S3 as primary storage
|
||||
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
|
||||
#
|
||||
# configs:
|
||||
# s3.config.php: |-
|
||||
# <?php
|
||||
# $CONFIG = array (
|
||||
# 'objectstore' => array(
|
||||
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||
# 'arguments' => array(
|
||||
# 'bucket' => 'my-bucket',
|
||||
# 'autocreate' => true,
|
||||
# 'key' => 'xxx',
|
||||
# 'secret' => 'xxx',
|
||||
# 'region' => 'us-east-1',
|
||||
# 'use_ssl' => true
|
||||
# )
|
||||
# )
|
||||
# );
|
||||
|
||||
nginx:
|
||||
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
|
||||
enabled: false
|
||||
|
||||
internalDatabase:
|
||||
enabled: false
|
||||
|
||||
##
|
||||
## External database configuration
|
||||
##
|
||||
externalDatabase:
|
||||
enabled: true
|
||||
type: postgresql
|
||||
host: nextcloud-postgres-rw
|
||||
|
||||
database: nextcloud
|
||||
existingSecret:
|
||||
enabled: true
|
||||
secretName: postgres-credentials
|
||||
usernameKey: username
|
||||
passwordKey: password
|
||||
|
||||
|
||||
mariadb:
|
||||
enabled: false
|
||||
postgresql:
|
||||
enabled: false
|
||||
redis:
|
||||
enabled: false
|
||||
|
||||
|
||||
cronjob:
|
||||
enabled: false
|
||||
|
||||
persistence:
|
||||
# Nextcloud Data (/var/www/html)
|
||||
enabled: true
|
||||
annotations: {}
|
||||
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
existingClaim: nextcloud-config
|
||||
|
||||
## Use an additional pvc for the data directory rather than a subpath of the default PVC
|
||||
## Useful to store data on a different storageClass (e.g. on slower disks)
|
||||
nextcloudData:
|
||||
enabled: false
|
||||
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
livenessProbe:
|
||||
enabled: false
|
||||
# disable when upgrading from a previous chart version
|
||||
|
||||
hpa:
|
||||
enabled: false
|
||||
|
||||
## Prometheus Exporter / Metrics
|
||||
##
|
||||
metrics:
|
||||
enabled: false
|
||||
|
||||
|
||||
rbac:
|
||||
enabled: false
|
11
apps/files/pvc.yaml
Normal file
11
apps/files/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: files-nfs
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
40
apps/files/syncthing/deployment.yaml
Normal file
40
apps/files/syncthing/deployment.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: syncthing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: syncthing
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
containers:
|
||||
- name: syncthing
|
||||
image: syncthing
|
||||
resources:
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
ports:
|
||||
- containerPort: 8384
|
||||
protocol: TCP
|
||||
name: syncthing-web
|
||||
- containerPort: 22000
|
||||
protocol: TCP
|
||||
- containerPort: 22000
|
||||
protocol: UDP
|
||||
volumeMounts:
|
||||
- name: persistence
|
||||
mountPath: /files
|
||||
- name: config
|
||||
mountPath: /var/syncthing/config
|
||||
volumes:
|
||||
- name: persistence
|
||||
persistentVolumeClaim:
|
||||
claimName: files-nfs
|
||||
- name: config
|
||||
persistentVolumeClaim:
|
||||
claimName: syncthing-config
|
16
apps/files/syncthing/ingress.yaml
Normal file
16
apps/files/syncthing/ingress.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: rss-ingressroute
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`syncthing2.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: syncthing-web
|
||||
port: 8384
|
||||
tls:
|
||||
certResolver: default-tls
|
15
apps/files/syncthing/kustomization.yaml
Normal file
15
apps/files/syncthing/kustomization.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- pvc.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
- servicemonitor.yaml
|
||||
# - syncthing-api.sealedsecret.yaml
|
||||
|
||||
images:
|
||||
- name: syncthing
|
||||
newName: syncthing/syncthing
|
||||
newTag: "1.27"
|
11
apps/files/syncthing/pvc.yaml
Normal file
11
apps/files/syncthing/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: syncthing-config
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
46
apps/files/syncthing/service.yaml
Normal file
46
apps/files/syncthing/service.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-web
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8384
|
||||
targetPort: 8384
|
||||
name: syncthing-web
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-listen
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: syncthing-service
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.3.5
|
||||
ports:
|
||||
- port: 22000
|
||||
targetPort: 22000
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-discover
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: syncthing-service
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.3.5
|
||||
ports:
|
||||
- port: 22000
|
||||
targetPort: 22000
|
||||
protocol: UDP
|
16
apps/files/syncthing/servicemonitor.yaml
Normal file
16
apps/files/syncthing/servicemonitor.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: syncthing-servicemonitor
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: syncthing
|
||||
endpoints:
|
||||
- port: syncthing-web
|
||||
path: /metrics
|
||||
bearerTokenSecret:
|
||||
name: syncthing-api
|
||||
key: token
|
30
apps/files1/deployment.yaml
Normal file
30
apps/files1/deployment.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: spacedrive
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: spacedrive
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: spacedrive
|
||||
spec:
|
||||
containers:
|
||||
- name: spacedrive
|
||||
image: spacedrive
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: /data
|
||||
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: spacedrive-nfs
|
15
apps/files1/kustomization.yaml
Normal file
15
apps/files1/kustomization.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: files1
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
- deployment.yaml
|
||||
|
||||
|
||||
images:
|
||||
- name: spacedrive
|
||||
newName: ghcr.io/spacedriveapp/spacedrive/server
|
||||
newTag: 0.2.4
|
4
apps/files1/namespace.yaml
Normal file
4
apps/files1/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
11
apps/files1/pvc.yaml
Normal file
11
apps/files1/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: spacedrive-nfs
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
@@ -1,12 +1,10 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: actualbudget
|
||||
labels:
|
||||
app: actualbudget
|
||||
spec:
|
||||
# deployment running a single container
|
||||
selector:
|
||||
matchLabels:
|
||||
app: actualbudget
|
||||
@@ -18,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: actualbudget
|
||||
image: actualbudget/actual-server:latest
|
||||
image: actualbudget
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: TZ
|
||||
@@ -34,67 +32,3 @@ spec:
|
||||
- name: actualbudget-data-nfs
|
||||
persistentVolumeClaim:
|
||||
claimName: actualbudget-data-nfs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: "actualbudget-data-nfs"
|
||||
spec:
|
||||
# storageClassName: fast
|
||||
capacity:
|
||||
storage: "5Gi"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
path: /export/kluster/actualbudget
|
||||
server: 192.168.1.157
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: "actualbudget-data-nfs"
|
||||
spec:
|
||||
storageClassName: "fast"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "5Gi"
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# directory: "journal-data"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: actualbudget
|
||||
spec:
|
||||
selector:
|
||||
app: actualbudget
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5006
|
||||
targetPort: 5006
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: actualbudget
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`actualbudget.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: actualbudget
|
||||
port: 5006
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
|
15
apps/finance/actualbudget.ingress.yaml
Normal file
15
apps/finance/actualbudget.ingress.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: actualbudget
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`actualbudget.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: actualbudget
|
||||
port: 5006
|
||||
tls:
|
||||
certResolver: default-tls
|
27
apps/finance/actualbudget.pvc.yaml
Normal file
27
apps/finance/actualbudget.pvc.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: "actualbudget-data-nfs"
|
||||
spec:
|
||||
capacity:
|
||||
storage: "5Gi"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
path: /export/kluster/actualbudget
|
||||
server: 192.168.1.157
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "actualbudget-data-nfs"
|
||||
spec:
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "5Gi"
|
||||
|
||||
volumeName: actualbudget-data-nfs
|
12
apps/finance/actualbudget.service.yaml
Normal file
12
apps/finance/actualbudget.service.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: actualbudget
|
||||
spec:
|
||||
selector:
|
||||
app: actualbudget
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5006
|
||||
targetPort: 5006
|
||||
type: ClusterIP
|
@@ -1,66 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: firefly-importer
|
||||
name: firefly-importer
|
||||
namespace: finance
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: firefly-importer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: firefly-importer
|
||||
spec:
|
||||
containers:
|
||||
- image: fireflyiii/data-importer:latest
|
||||
imagePullPolicy: Always
|
||||
name: firefly-importer
|
||||
resources: {}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: FIREFLY_III_ACCESS_TOKEN
|
||||
value: redacted
|
||||
- name: FIREFLY_III_URL
|
||||
value: firefly-http:8080
|
||||
# - name: APP_URL
|
||||
# value: https://finance.kluster.moll.re
|
||||
- name: TRUSTED_PROXIES
|
||||
value: "**"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: firefly-importer-http
|
||||
namespace: finance
|
||||
labels:
|
||||
app: firefly-importer-http
|
||||
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
# name: http
|
||||
selector:
|
||||
app: firefly-importer
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: firefly-importer-ingress
|
||||
namespace: finance
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`importer.finance.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: firefly-importer-http
|
||||
port: 8080
|
||||
tls:
|
||||
certResolver: default-tls
|
@@ -1,79 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: firefly
|
||||
name: firefly
|
||||
namespace: finance
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: firefly
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: firefly
|
||||
spec:
|
||||
containers:
|
||||
- image: fireflyiii/core:latest
|
||||
imagePullPolicy: Always
|
||||
name: firefly
|
||||
resources: {}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: APP_ENV
|
||||
value: "local"
|
||||
- name: APP_KEY
|
||||
value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl
|
||||
- name: DB_CONNECTION
|
||||
value: sqlite
|
||||
- name: APP_URL
|
||||
value: https://finance.kluster.moll.re
|
||||
- name: TRUSTED_PROXIES
|
||||
value: "**"
|
||||
volumeMounts:
|
||||
- mountPath: /var/www/html/storage/database
|
||||
name: firefly-database
|
||||
|
||||
|
||||
volumes:
|
||||
- name: firefly-database
|
||||
persistentVolumeClaim:
|
||||
claimName: firefly-database-nfs
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: firefly-http
|
||||
namespace: finance
|
||||
labels:
|
||||
app: firefly-http
|
||||
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
# name: http
|
||||
selector:
|
||||
app: firefly
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: firefly-ingress
|
||||
namespace: finance
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`finance.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: firefly-http
|
||||
port: 8080
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
|
||||
|
@@ -1,34 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: firefly-database-nfs
|
||||
labels:
|
||||
directory: firefly
|
||||
spec:
|
||||
# storageClassName: fast
|
||||
# volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadOnlyMany
|
||||
capacity:
|
||||
storage: "1G"
|
||||
|
||||
nfs:
|
||||
path: /firefly # inside nfs part.
|
||||
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
namespace: finance
|
||||
name: firefly-database-nfs
|
||||
spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: "1G"
|
||||
# storageClassName: fast
|
||||
accessModes:
|
||||
- ReadOnlyMany
|
||||
volumeName: firefly-database-nfs
|
||||
|
16
apps/finance/kustomization.yaml
Normal file
16
apps/finance/kustomization.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: finance
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- actualbudget.pvc.yaml
|
||||
- actualbudget.deployment.yaml
|
||||
- actualbudget.service.yaml
|
||||
- actualbudget.ingress.yaml
|
||||
|
||||
images:
|
||||
- name: actualbudget
|
||||
newName: actualbudget/actual-server
|
||||
newTag: 24.2.0
|
4
apps/finance/namespace.yaml
Normal file
4
apps/finance/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
@@ -1,17 +0,0 @@
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: homarr
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
- ingress.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: homarr
|
||||
releaseName: homarr
|
||||
repo: https://oben01.github.io/charts/
|
||||
version: 1.0.1
|
||||
valuesFile: values.yaml
|
@@ -1,58 +0,0 @@
|
||||
# -- Default values for homarr
|
||||
# -- Declare variables to be passed into your templates.
|
||||
|
||||
# -- Number of replicas
|
||||
replicaCount: 1
|
||||
env:
|
||||
# -- Your local time zone
|
||||
TZ: "Europe/Berlin"
|
||||
# -- Colors and preferences, possible values dark / light
|
||||
DEFAULT_COLOR_SCHEME: "dark"
|
||||
|
||||
# -- Service configuration
|
||||
service:
|
||||
# -- Service type
|
||||
type: ClusterIP
|
||||
# -- Service port
|
||||
port: 7575
|
||||
# -- Service target port
|
||||
targetPort: 7575
|
||||
|
||||
# -- Ingress configuration
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
persistence:
|
||||
- name: homarr-config
|
||||
# -- Enable homarr-config persistent storage
|
||||
enabled: true
|
||||
# -- homarr-config storage class name
|
||||
storageClassName: "nfs-client"
|
||||
# -- homarr-config access mode
|
||||
accessMode: "ReadWriteOnce"
|
||||
# -- homarr-config storage size
|
||||
size: "50Mi"
|
||||
# -- homarr-config mount path inside the pod
|
||||
mountPath: "/app/data/configs"
|
||||
- name: homarr-database
|
||||
# -- Enable homarr-database persistent storage
|
||||
enabled: true
|
||||
# -- homarr-database storage class name
|
||||
storageClassName: "nfs-client"
|
||||
# -- homarr-database access mode
|
||||
accessMode: "ReadWriteOnce"
|
||||
# -- homarr-database storage size
|
||||
size: "50Mi"
|
||||
# -- homarr-database mount path inside the pod
|
||||
mountPath: "/app/database"
|
||||
- name: homarr-icons
|
||||
# -- Enable homarr-icons persistent storage
|
||||
enabled: true
|
||||
# -- homarr-icons storage class name
|
||||
storageClassName: "nfs-client"
|
||||
# -- homarr-icons access mode
|
||||
accessMode: "ReadWriteOnce"
|
||||
# -- homarr-icons storage size
|
||||
size: "50Mi"
|
||||
# -- homarr-icons mount path inside the pod
|
||||
mountPath: "/app/public/icons"
|
@@ -13,4 +13,4 @@ resources:
|
||||
images:
|
||||
- name: homeassistant/home-assistant
|
||||
newName: homeassistant/home-assistant
|
||||
newTag: "2023.12"
|
||||
newTag: "2024.2"
|
||||
|
@@ -1,16 +1,24 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- ingress.yaml
|
||||
- pvc.yaml
|
||||
- postgres.sealedsecret.yaml
|
||||
- namespace.yaml
|
||||
- ingress.yaml
|
||||
- pvc.yaml
|
||||
- postgres.yaml
|
||||
- postgres.sealedsecret.yaml
|
||||
|
||||
namespace: immich
|
||||
|
||||
helmCharts:
|
||||
- name: immich
|
||||
releaseName: immich
|
||||
version: 0.2.0
|
||||
version: 0.3.1
|
||||
valuesFile: values.yaml
|
||||
repo: https://immich-app.github.io/immich-charts
|
||||
|
||||
|
||||
images:
|
||||
- name: ghcr.io/immich-app/immich-machine-learning
|
||||
newTag: v1.95.1
|
||||
- name: ghcr.io/immich-app/immich-server
|
||||
newTag: v1.95.1
|
||||
|
25
apps/immich/postgres.yaml
Normal file
25
apps/immich/postgres.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: immich-postgres
|
||||
spec:
|
||||
instances: 1
|
||||
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.2
|
||||
bootstrap:
|
||||
initdb:
|
||||
owner: immich
|
||||
database: immich
|
||||
secret:
|
||||
name: postgres-password
|
||||
|
||||
postgresql:
|
||||
shared_preload_libraries:
|
||||
- "vectors.so"
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
storageClass: nfs-client
|
||||
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
@@ -2,15 +2,11 @@
|
||||
## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
|
||||
## Refer there for more detail about the supported values
|
||||
|
||||
|
||||
image:
|
||||
tag: v1.90.2
|
||||
|
||||
# These entries are shared between all the Immich components
|
||||
|
||||
env:
|
||||
REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}'
|
||||
DB_HOSTNAME: "postgres-postgresql.postgres"
|
||||
DB_HOSTNAME: "immich-postgres-rw"
|
||||
DB_USERNAME:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@@ -26,11 +22,7 @@ env:
|
||||
secretKeyRef:
|
||||
name: postgres-password
|
||||
key: password
|
||||
TYPESENSE_ENABLED: "{{ .Values.typesense.enabled }}"
|
||||
TYPESENSE_API_KEY: "{{ .Values.typesense.env.TYPESENSE_API_KEY }}"
|
||||
TYPESENSE_HOST: '{{ printf "%s-typesense" .Release.Name }}'
|
||||
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
|
||||
IMMICH_SERVER_URL: '{{ printf "http://%s-server:3001" .Release.Name }}'
|
||||
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
|
||||
|
||||
immich:
|
||||
@@ -52,18 +44,6 @@ redis:
|
||||
auth:
|
||||
enabled: false
|
||||
|
||||
typesense:
|
||||
enabled: true
|
||||
env:
|
||||
TYPESENSE_DATA_DIR: /tsdata
|
||||
TYPESENSE_API_KEY: typesense
|
||||
persistence:
|
||||
tsdata:
|
||||
# Enabling typesense persistence is recommended to avoid slow reindexing
|
||||
enabled: true
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
# Immich components
|
||||
|
||||
server:
|
||||
|
30
apps/matrix/kustomization.yaml
Normal file
30
apps/matrix/kustomization.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- postgres.yaml
|
||||
- synapse.deployment.yaml
|
||||
- synapse.service.yaml
|
||||
- synapse.configmap.yaml
|
||||
- synapse.ingress.yaml
|
||||
- postgres-credentials.secret.yaml
|
||||
|
||||
- mautrix.pvc.yaml
|
||||
- mautrix-telegram.statefulset.yaml
|
||||
- mautrix-telegram.configmap.yaml
|
||||
- mautrix-whatsapp.statefulset.yaml
|
||||
|
||||
|
||||
namespace: matrix
|
||||
|
||||
images:
|
||||
- name: mautrix-telegram
|
||||
newName: dock.mau.dev/mautrix/telegram
|
||||
newTag: "v0.15.1"
|
||||
- name: mautrix-whatsapp
|
||||
newName: dock.mau.dev/mautrix/whatsapp
|
||||
newTag: "v0.10.5"
|
||||
- name: synapse
|
||||
newName: ghcr.io/element-hq/synapse
|
||||
newTag: "v1.100.0"
|
511
apps/matrix/mautrix-telegram.configmap.yaml
Normal file
511
apps/matrix/mautrix-telegram.configmap.yaml
Normal file
@@ -0,0 +1,511 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mautrix-telegram
|
||||
data:
|
||||
config.yaml: |
|
||||
# Homeserver details
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: http://synapse:8448
|
||||
# The domain of the homeserver (for MXIDs, etc).
|
||||
domain: matrix.kluster.moll.re
|
||||
# Whether or not to verify the SSL certificate of the homeserver.
|
||||
# Only applies if address starts with https://
|
||||
verify_ssl: false
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# Number of retries for all HTTP requests if the homeserver isn't reachable.
|
||||
http_retry_count: 4
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint: null
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint: null
|
||||
# Whether asynchronous uploads via MSC2246 should be enabled for media.
|
||||
# Requires a media repo that supports MSC2246.
|
||||
async_media: false
|
||||
# Application service host/registration related details
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://mautrix-telegram:29318
|
||||
# When using https:// the TLS certificate and key files for the address.
|
||||
tls_cert: false
|
||||
tls_key: false
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29317
|
||||
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
|
||||
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
|
||||
max_body_size: 1
|
||||
# The full URI to the database. SQLite and Postgres are supported.
|
||||
# Format examples:
|
||||
# SQLite: sqlite:filename.db
|
||||
# Postgres: postgres://username:password@hostname/dbname
|
||||
database: sqlite:mautrix-telegram.db
|
||||
|
||||
# The unique ID of this appservice.
|
||||
id: telegram
|
||||
# Username of the appservice bot.
|
||||
bot_username: telegrambot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
bot_displayname: Telegram bridge bot
|
||||
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
|
||||
ephemeral_events: true
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "This value is generated when generating the registration"
|
||||
hs_token: "This value is generated when generating the registration"
|
||||
|
||||
# Bridge config
|
||||
bridge:
|
||||
# Localpart template of MXIDs for Telegram users.
|
||||
# {userid} is replaced with the user ID of the Telegram user.
|
||||
username_template: "telegram_{userid}"
|
||||
# Localpart template of room aliases for Telegram portal rooms.
|
||||
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
|
||||
alias_template: "telegram_{groupname}"
|
||||
# Displayname template for Telegram users.
|
||||
# {displayname} is replaced with the display name of the Telegram user.
|
||||
displayname_template: "{displayname} (Telegram)"
|
||||
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
|
||||
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
|
||||
# ID is used.
|
||||
#
|
||||
# If the bridge is working properly, a phone number or an username should always be known, but
|
||||
# the other one can very well be empty.
|
||||
#
|
||||
# Valid keys:
|
||||
# "full name" (First and/or last name)
|
||||
# "full name reversed" (Last and/or first name)
|
||||
# "first name"
|
||||
# "last name"
|
||||
# "username"
|
||||
# "phone number"
|
||||
displayname_preference:
|
||||
- full name
|
||||
- username
|
||||
- phone number
|
||||
# Maximum length of displayname
|
||||
displayname_max_length: 100
|
||||
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
|
||||
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
|
||||
# you're on a single-user instance, this should be safe to enable.
|
||||
allow_avatar_remove: false
|
||||
# Should contact names and profile pictures be allowed?
|
||||
# This is only safe to enable on single-user instances.
|
||||
allow_contact_info: false
|
||||
# Maximum number of members to sync per portal when starting up. Other members will be
|
||||
# synced when they send messages. The maximum is 10000, after which the Telegram server
|
||||
# will not send any more members.
|
||||
# -1 means no limit (which means it's limited to 10000 by the server)
|
||||
max_initial_member_sync: 100
|
||||
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
|
||||
# If there are more members when trying to create a room, the room creation will be cancelled.
|
||||
# -1 means no limit (which means all chats can be bridged)
|
||||
max_member_count: -1
|
||||
# Whether or not to sync the member list in channels.
|
||||
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
|
||||
# list regardless of this setting.
|
||||
sync_channel_members: false
|
||||
# Whether or not to skip deleted members when syncing members.
|
||||
skip_deleted_members: true
|
||||
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
|
||||
# their Telegram account at startup.
|
||||
startup_sync: false
|
||||
# Number of most recently active dialogs to check when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_update_limit: 0
|
||||
# Number of most recently active dialogs to create portals for when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_create_limit: 15
|
||||
# Should all chats be scheduled to be created later?
|
||||
# This is best used in combination with MSC2716 infinite backfill.
|
||||
sync_deferred_create_all: false
|
||||
# Whether or not to sync and create portals for direct chats at startup.
|
||||
sync_direct_chats: false
|
||||
# The maximum number of simultaneous Telegram deletions to handle.
|
||||
# A large number of simultaneous redactions could put strain on your homeserver.
|
||||
max_telegram_delete: 10
|
||||
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
|
||||
# at startup and when creating a bridge.
|
||||
sync_matrix_state: true
|
||||
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
|
||||
# out-of-Matrix login website (see appservice.public config section)
|
||||
allow_matrix_login: true
|
||||
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
|
||||
public_portals: false
|
||||
# Whether or not to use /sync to get presence, read receipts and typing notifications
|
||||
# when double puppeting is enabled
|
||||
sync_with_custom_puppets: false
|
||||
# Whether or not to update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Servers to always allow double puppeting from
|
||||
double_puppet_server_map:
|
||||
example.com: https://example.com
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
|
||||
#
|
||||
# If set, custom puppets will be enabled automatically for local users
|
||||
# instead of users having to find an access token and run `login-matrix`
|
||||
# manually.
|
||||
# If using this for other servers than the bridge's server,
|
||||
# you must also set the URL in the double_puppet_server_map.
|
||||
login_shared_secret_map:
|
||||
example.com: foobar
|
||||
# Set to false to disable link previews in messages sent to Telegram.
|
||||
telegram_link_preview: true
|
||||
# Whether or not the !tg join command should do a HTTP request
|
||||
# to resolve redirects in invite links.
|
||||
invite_link_resolve: false
|
||||
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
|
||||
# This is currently not supported in most clients.
|
||||
caption_in_message: false
|
||||
# Maximum size of image in megabytes before sending to Telegram as a document.
|
||||
image_as_file_size: 10
|
||||
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
|
||||
image_as_file_pixels: 16777216
|
||||
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
|
||||
# streaming from/to Matrix and using many connections for Telegram.
|
||||
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
|
||||
# This option uses internal Telethon implementation details and may break with minor updates.
|
||||
parallel_file_transfer: false
|
||||
# Whether or not created rooms should have federation enabled.
|
||||
# If false, created portal rooms will never be federated.
|
||||
federate_rooms: true
|
||||
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
|
||||
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
|
||||
always_custom_emoji_reaction: false
|
||||
# Settings for converting animated stickers.
|
||||
animated_sticker:
|
||||
# Format to which animated stickers should be converted.
|
||||
# disable - No conversion, send as-is (gzipped lottie)
|
||||
# png - converts to non-animated png (fastest),
|
||||
# gif - converts to animated gif
|
||||
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
|
||||
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
|
||||
target: gif
|
||||
# Should video stickers be converted to the specified format as well?
|
||||
convert_from_webm: false
|
||||
# Arguments for converter. All converters take width and height.
|
||||
args:
|
||||
width: 256
|
||||
height: 256
|
||||
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
|
||||
# Settings for converting animated emoji.
|
||||
# Same as animated_sticker, but webm is not supported as the target
|
||||
# (because inline images can only contain images, not videos).
|
||||
animated_emoji:
|
||||
target: webp
|
||||
args:
|
||||
width: 64
|
||||
height: 64
|
||||
fps: 25
|
||||
# # End-to-bridge encryption support options.
|
||||
# #
|
||||
# # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
# encryption:
|
||||
# # Allow encryption, work in group chat rooms with e2ee enabled
|
||||
# allow: false
|
||||
# # Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# # This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
# default: false
|
||||
# # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
# appservice: false
|
||||
# # Require encryption, drop any unencrypted messages.
|
||||
# require: false
|
||||
# # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# # You must use a client that supports requesting keys from other users to use this feature.
|
||||
# allow_key_sharing: false
|
||||
# # Options for deleting megolm sessions from the bridge.
|
||||
# delete_keys:
|
||||
# # Beeper-specific: delete outbound sessions when hungryserv confirms
|
||||
# # that the user has uploaded the key to key backup.
|
||||
# delete_outbound_on_ack: false
|
||||
# # Don't store outbound sessions in the inbound table.
|
||||
# dont_store_outbound: false
|
||||
# # Ratchet megolm sessions forward after decrypting messages.
|
||||
# ratchet_on_decrypt: false
|
||||
# # Delete fully used keys (index >= max_messages) after decrypting messages.
|
||||
# delete_fully_used_on_decrypt: false
|
||||
# # Delete previous megolm sessions from same device when receiving a new one.
|
||||
# delete_prev_on_new_session: false
|
||||
# # Delete megolm sessions received from a device when the device is deleted.
|
||||
# delete_on_device_delete: false
|
||||
# # Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
|
||||
# periodically_delete_expired: false
|
||||
# # Delete inbound megolm sessions that don't have the received_at field used for
|
||||
# # automatic ratcheting and expired session deletion. This is meant as a migration
|
||||
# # to delete old keys prior to the bridge update.
|
||||
# delete_outdated_inbound: false
|
||||
# # What level of device verification should be required from users?
|
||||
# #
|
||||
# # Valid levels:
|
||||
# # unverified - Send keys to all device in the room.
|
||||
# # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# # Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# # verified - Require manual per-device verification
|
||||
# # (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
# verification_levels:
|
||||
# # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
|
||||
# receive: unverified
|
||||
# # Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
# send: unverified
|
||||
# # Minimum level that the bridge should require for accepting key requests.
|
||||
# share: cross-signed-tofu
|
||||
# # Options for Megolm room key rotation. These options allow you to
|
||||
# # configure the m.room.encryption event content. See:
|
||||
# # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# # more information about that event.
|
||||
# rotation:
|
||||
# # Enable custom Megolm room key rotation settings. Note that these
|
||||
# # settings will only apply to rooms created after this option is
|
||||
# # set.
|
||||
# enable_custom: false
|
||||
# # The maximum number of milliseconds a session should be used
|
||||
# # before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# # as the default.
|
||||
# milliseconds: 604800000
|
||||
# # The maximum number of messages that should be sent with a given a
|
||||
# # session before changing it. The Matrix spec recommends 100 as the
|
||||
# # default.
|
||||
# messages: 100
|
||||
# # Disable rotating keys when a user's devices change?
|
||||
# # You should not enable this option unless you understand all the implications.
|
||||
# disable_device_change_key_rotation: false
|
||||
# Whether to explicitly set the avatar and room name for private chat portal rooms.
|
||||
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
|
||||
# If set to `always`, all DM rooms will have explicit names and avatars set.
|
||||
# If set to `never`, DM rooms will never have names and avatars set.
|
||||
private_chat_portal_meta: default
|
||||
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
|
||||
# but they're being phased out and will be completely removed in the future.
|
||||
disable_reply_fallbacks: false
|
||||
# Should cross-chat replies from Telegram be bridged? Most servers and clients don't support this.
|
||||
cross_room_replies: false
|
||||
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
|
||||
# been sent to Telegram.
|
||||
delivery_receipts: false
|
||||
# Whether or not delivery errors should be reported as messages in the Matrix room.
|
||||
delivery_error_reports: false
|
||||
# Should errors in incoming message handling send a message to the Matrix room?
|
||||
incoming_bridge_error_reports: false
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
|
||||
# This field will automatically be changed back to false after it,
|
||||
# except if the config file is not writable.
|
||||
resend_bridge_info: false
|
||||
# When using double puppeting, should muted chats be muted in Matrix?
|
||||
mute_bridging: false
|
||||
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
|
||||
# The favorites tag is `m.favourite`.
|
||||
pinned_tag: null
|
||||
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
|
||||
archive_tag: null
|
||||
# Whether or not mute status and tags should only be bridged when the portal room is created.
|
||||
tag_only_on_create: true
|
||||
# Should leaving the room on Matrix make the user leave on Telegram?
|
||||
bridge_matrix_leave: true
|
||||
# Should the user be kicked out of all portals when logging out of the bridge?
|
||||
kick_on_logout: true
|
||||
# Should the "* user joined Telegram" notice always be marked as read automatically?
|
||||
always_read_joined_telegram_notice: true
|
||||
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
|
||||
# Requires the user to have sufficient power level and double puppeting enabled.
|
||||
create_group_on_invite: true
|
||||
# Settings for backfilling messages from Telegram.
|
||||
backfill:
|
||||
# Allow backfilling at all?
|
||||
enable: true
|
||||
# Whether or not to enable backfilling in normal groups.
|
||||
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
|
||||
# will likely cause problems if there are multiple Matrix users in the group.
|
||||
normal_groups: false
|
||||
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
|
||||
# Set to -1 to let any chat be unread.
|
||||
unread_hours_threshold: 720
|
||||
# Forward backfilling limits.
|
||||
#
|
||||
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
|
||||
forward_limits:
|
||||
# Number of messages to backfill immediately after creating a portal.
|
||||
initial:
|
||||
user: 50
|
||||
normal_group: 100
|
||||
supergroup: 10
|
||||
channel: 10
|
||||
# Number of messages to backfill when syncing chats.
|
||||
sync:
|
||||
user: 100
|
||||
normal_group: 100
|
||||
supergroup: 100
|
||||
channel: 100
|
||||
# Timeout for forward backfills in seconds. If you have a high limit, you'll have to increase this too.
|
||||
forward_timeout: 900
|
||||
# Settings for incremental backfill of history. These only apply to Beeper, as upstream abandoned MSC2716.
|
||||
incremental:
|
||||
# Maximum number of messages to backfill per batch.
|
||||
messages_per_batch: 100
|
||||
# The number of seconds to wait after backfilling the batch of messages.
|
||||
post_batch_delay: 20
|
||||
# The maximum number of batches to backfill per portal, split by the chat type.
|
||||
# If set to -1, all messages in the chat will eventually be backfilled.
|
||||
max_batches:
|
||||
# Direct chats
|
||||
user: -1
|
||||
# Normal groups. Note that the normal_groups option above must be enabled
|
||||
# for these to be backfilled.
|
||||
normal_group: -1
|
||||
# Supergroups
|
||||
supergroup: 10
|
||||
# Broadcast channels
|
||||
channel: -1
|
||||
# Overrides for base power levels.
|
||||
initial_power_level_overrides:
|
||||
user: {}
|
||||
group: {}
|
||||
# Whether to bridge Telegram bot messages as m.notices or m.texts.
|
||||
bot_messages_as_notices: true
|
||||
bridge_notices:
|
||||
# Whether or not Matrix bot messages (type m.notice) should be bridged.
|
||||
default: false
|
||||
# List of user IDs for whom the previous flag is flipped.
|
||||
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
|
||||
# notices from users listed here will be bridged.
|
||||
exceptions: []
|
||||
# An array of possible values for the $distinguisher variable in message formats.
|
||||
# Each user gets one of the values here, based on a hash of their user ID.
|
||||
# If the array is empty, the $distinguisher variable will also be empty.
|
||||
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
|
||||
# The formats to use when sending messages to Telegram via the relay bot.
|
||||
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
|
||||
#
|
||||
# Available variables:
|
||||
# $sender_displayname - The display name of the sender (e.g. Example User)
|
||||
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
|
||||
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
|
||||
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
|
||||
# $message - The message content
|
||||
message_formats:
|
||||
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
|
||||
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
|
||||
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
|
||||
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
|
||||
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
|
||||
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
|
||||
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
|
||||
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
|
||||
# Telegram user info is available in the following variables:
|
||||
# $displayname - Telegram displayname
|
||||
# $username - Telegram username (may not exist)
|
||||
# $mention - Telegram @username or displayname mention (depending on which exists)
|
||||
emote_format: "* $mention $formatted_body"
|
||||
# The formats to use when sending state events to Telegram via the relay bot.
|
||||
#
|
||||
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
|
||||
# In name_change events, `$prev_displayname` is the previous displayname.
|
||||
#
|
||||
# Set format to an empty string to disable the messages for that event.
|
||||
state_event_formats:
|
||||
join: "$distinguisher <b>$displayname</b> joined the room."
|
||||
leave: "$distinguisher <b>$displayname</b> left the room."
|
||||
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
|
||||
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
|
||||
# `filter-mode` management commands.
|
||||
#
|
||||
# An empty blacklist will essentially disable the filter.
|
||||
filter:
|
||||
# Filter mode to use. Either "blacklist" or "whitelist".
|
||||
# If the mode is "blacklist", the listed chats will never be bridged.
|
||||
# If the mode is "whitelist", only the listed chats can be bridged.
|
||||
mode: blacklist
|
||||
# The list of group/channel IDs to filter.
|
||||
list: []
|
||||
# How to handle direct chats:
|
||||
# If users is "null", direct chats will follow the previous settings.
|
||||
# If users is "true", direct chats will always be bridged.
|
||||
# If users is "false", direct chats will never be bridged.
|
||||
users: true
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: "!tg"
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: "Hello, I'm a Telegram bridge bot."
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: "Use `help` for help."
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: "Use `help` for help or `login` to log in."
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ""
|
||||
# Send each message separately (for readability in some clients)
|
||||
management_room_multiple_messages: false
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relaybot - Only use the bridge via the relaybot, no access to commands.
|
||||
# user - Relaybot level + access to commands to create bridges.
|
||||
# puppeting - User level + logging in with a Telegram account.
|
||||
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
|
||||
# admin - Full access to use the bridge and some extra administration commands.
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
"matrix.kluster.moll.re": "full"
|
||||
"@remy:matrix.kluster.moll.re": "admin"
|
||||
# Options related to the message relay Telegram bot.
|
||||
relaybot:
|
||||
private_chat:
|
||||
# List of users to invite to the portal when someone starts a private chat with the bot.
|
||||
# If empty, private chats with the bot won't create a portal.
|
||||
invite: []
|
||||
# Whether or not to bridge state change messages in relaybot private chats.
|
||||
state_changes: true
|
||||
# When private_chat_invite is empty, this message is sent to users /starting the
|
||||
# relaybot. Telegram's "markdown" is supported.
|
||||
message: This is a Matrix bridge relaybot and does not support direct chats
|
||||
# List of users to invite to all group chat portals created by the bridge.
|
||||
group_chat_invite: []
|
||||
# Whether or not the relaybot should not bridge events in unbridged group chats.
|
||||
# If false, portals will be created when the relaybot receives messages, just like normal
|
||||
# users. This behavior is usually not desirable, as it interferes with manually bridging
|
||||
# the chat to another room.
|
||||
ignore_unbridged_group_chat: true
|
||||
# Whether or not to allow creating portals from Telegram.
|
||||
authless_portals: true
|
||||
# Whether or not to allow Telegram group admins to use the bot commands.
|
||||
whitelist_group_admins: true
|
||||
# Whether or not to ignore incoming events sent by the relay bot.
|
||||
ignore_own_incoming_events: true
|
||||
# List of usernames/user IDs who are also allowed to use the bot commands.
|
||||
whitelist:
|
||||
- myusername
|
||||
- 12345678
|
||||
# Telegram config
|
||||
telegram:
|
||||
# Get your own API keys at https://my.telegram.org/apps
|
||||
api_id: 862555
|
||||
api_hash: 7387a7b6ba71793d6f3fa98261117e4e
|
||||
# (Optional) Create your own bot at https://t.me/BotFather
|
||||
bot_token: disabled
|
||||
# Should the bridge request missed updates from Telegram when restarting?
|
||||
catch_up: true
|
||||
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
|
||||
sequential_updates: true
|
||||
exit_on_update_error: false
|
32
apps/matrix/mautrix-telegram.statefulset.yaml
Normal file
32
apps/matrix/mautrix-telegram.statefulset.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mautrix-telegram
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mautrix-telegram
|
||||
serviceName: mautrix-telegram
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mautrix-telegram
|
||||
spec:
|
||||
containers:
|
||||
- name: mautrix-telegram
|
||||
image: mautrix-telegram
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /data/config.yaml
|
||||
subPath: config.yaml
|
||||
- name: persistence
|
||||
mountPath: /data
|
||||
args:
|
||||
- --no-update # disable overwriting config.yaml
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: mautrix-telegram
|
||||
- name: persistence
|
||||
emptyDir: {}
|
428
apps/matrix/mautrix-whatsapp.configmap.yaml
Normal file
428
apps/matrix/mautrix-whatsapp.configmap.yaml
Normal file
@@ -0,0 +1,428 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mautrix-whatsapp
|
||||
data:
|
||||
config.yaml: |
|
||||
# Homeserver details.
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: http://synapse:8448
|
||||
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
|
||||
domain: matrix.kluster.moll.re
|
||||
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint: null
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint: null
|
||||
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
|
||||
async_media: false
|
||||
|
||||
# Should the bridge use a websocket for connecting to the homeserver?
|
||||
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
|
||||
# mautrix-asmux (deprecated), and hungryserv (proprietary).
|
||||
websocket: false
|
||||
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
|
||||
ping_interval_seconds: 0
|
||||
|
||||
# Application service host/registration related details.
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://mautrix-whatsapp:29318
|
||||
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29318
|
||||
|
||||
# Database config.
|
||||
database:
|
||||
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
|
||||
type: sqlite3-fk-wal
|
||||
# The database URI.
|
||||
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
|
||||
# https://github.com/mattn/go-sqlite3#connection-string
|
||||
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
|
||||
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
|
||||
uri: file:/data/mautrix-whatsapp.db?_txlock=immediate
|
||||
# Maximum number of connections. Mostly relevant for Postgres.
|
||||
max_open_conns: 20
|
||||
max_idle_conns: 2
|
||||
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
|
||||
# Parsed with https://pkg.go.dev/time#ParseDuration
|
||||
max_conn_idle_time: null
|
||||
max_conn_lifetime: null
|
||||
|
||||
# The unique ID of this appservice.
|
||||
id: whatsapp
|
||||
# Appservice bot details.
|
||||
bot:
|
||||
# Username of the appservice bot.
|
||||
username: whatsappbot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
displayname: WhatsApp bridge bot
|
||||
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
|
||||
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
ephemeral_events: true
|
||||
|
||||
# Should incoming events be handled asynchronously?
|
||||
# This may be necessary for large public instances with lots of messages going through.
|
||||
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
|
||||
async_transactions: false
|
||||
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "This value is generated when generating the registration"
|
||||
hs_token: "This value is generated when generating the registration"
|
||||
|
||||
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
|
||||
analytics:
|
||||
# Hostname of the tracking server. The path is hardcoded to /v1/track
|
||||
host: api.segment.io
|
||||
# API key to send with tracking requests. Tracking is disabled if this is null.
|
||||
token: null
|
||||
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
|
||||
user_id: null
|
||||
|
||||
# Prometheus config.
|
||||
metrics:
|
||||
# Enable prometheus metrics?
|
||||
enabled: false
|
||||
# IP and port where the metrics listener should be. The path is always /metrics
|
||||
listen: 127.0.0.1:8001
|
||||
|
||||
# Config for things that are directly sent to WhatsApp.
|
||||
whatsapp:
|
||||
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
|
||||
os_name: Mautrix-WhatsApp bridge
|
||||
# Browser name that determines the logo shown in the mobile app.
|
||||
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
|
||||
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
|
||||
browser_name: unknown
|
||||
|
||||
# Bridge config
|
||||
bridge:
|
||||
# Localpart template of MXIDs for WhatsApp users.
|
||||
# {{.}} is replaced with the phone number of the WhatsApp user.
|
||||
username_template: whatsapp_{{.}}
|
||||
# Displayname template for WhatsApp users.
|
||||
# {{.PushName}} - nickname set by the WhatsApp user
|
||||
# {{.BusinessName}} - validated WhatsApp business name
|
||||
# {{.Phone}} - phone number (international format)
|
||||
# The following variables are also available, but will cause problems on multi-user instances:
|
||||
# {{.FullName}} - full name from contact list
|
||||
# {{.FirstName}} - first name from contact list
|
||||
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
|
||||
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
|
||||
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
|
||||
personal_filtering_spaces: false
|
||||
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
|
||||
delivery_receipts: false
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
|
||||
message_error_notices: true
|
||||
# Should incoming calls send a message to the Matrix room?
|
||||
call_start_notices: true
|
||||
# Should another user's cryptographic identity changing send a message to Matrix?
|
||||
identity_change_notices: false
|
||||
portal_message_buffer: 128
|
||||
# Settings for handling history sync payloads.
|
||||
history_sync:
|
||||
# Enable backfilling history sync payloads from WhatsApp?
|
||||
backfill: true
|
||||
# The maximum number of initial conversations that should be synced.
|
||||
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
|
||||
max_initial_conversations: -1
|
||||
# Maximum number of messages to backfill in each conversation.
|
||||
# Set to -1 to disable limit.
|
||||
message_count: 50
|
||||
# Should the bridge request a full sync from the phone when logging in?
|
||||
# This bumps the size of history syncs from 3 months to 1 year.
|
||||
request_full_sync: false
|
||||
# Configuration parameters that are sent to the phone along with the request full sync flag.
|
||||
# By default (when the values are null or 0), the config isn't sent at all.
|
||||
full_sync_config:
|
||||
# Number of days of history to request.
|
||||
# The limit seems to be around 3 years, but using higher values doesn't break.
|
||||
days_limit: null
|
||||
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
|
||||
size_mb_limit: null
|
||||
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
|
||||
storage_quota_mb: null
|
||||
# If this value is greater than 0, then if the conversation's last message was more than
|
||||
# this number of hours ago, then the conversation will automatically be marked it as read.
|
||||
# Conversations that have a last message that is less than this number of hours ago will
|
||||
# have their unread status synced from WhatsApp.
|
||||
unread_hours_threshold: 0
|
||||
|
||||
|
||||
|
||||
# Should puppet avatars be fetched from the server even if an avatar is already set?
|
||||
user_avatar_sync: true
|
||||
# Should Matrix users leaving groups be bridged to WhatsApp?
|
||||
bridge_matrix_leave: true
|
||||
# Should the bridge update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
|
||||
# WhatsApp and set the unread status on initial backfill?
|
||||
# This will only work on clients that support the m.marked_unread or
|
||||
# com.famedly.marked_unread room account data.
|
||||
sync_manual_marked_unread: true
|
||||
# When double puppeting is enabled, users can use `!wa toggle` to change whether
|
||||
# presence is bridged. This setting sets the default value.
|
||||
# Existing users won't be affected when these are changed.
|
||||
default_bridge_presence: true
|
||||
# Send the presence as "available" to whatsapp when users start typing on a portal.
|
||||
# This works as a workaround for homeservers that do not support presence, and allows
|
||||
# users to see when the whatsapp user on the other side is typing during a conversation.
|
||||
send_presence_on_typing: false
|
||||
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
|
||||
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
|
||||
#
|
||||
# By default, the bridge acts like WhatsApp web, which only sends active delivery
|
||||
# receipts when it's in the foreground.
|
||||
force_active_delivery_receipts: false
|
||||
# Servers to always allow double puppeting from
|
||||
double_puppet_server_map:
|
||||
example.com: https://example.com
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
|
||||
#
|
||||
# If set, double puppeting will be enabled automatically for local users
|
||||
# instead of users having to find an access token and run `login-matrix`
|
||||
# manually.
|
||||
login_shared_secret_map:
|
||||
example.com: foobar
|
||||
# Whether to explicitly set the avatar and room name for private chat portal rooms.
|
||||
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
|
||||
# If set to `always`, all DM rooms will have explicit names and avatars set.
|
||||
# If set to `never`, DM rooms will never have names and avatars set.
|
||||
private_chat_portal_meta: default
|
||||
# Should group members be synced in parallel? This makes member sync faster
|
||||
parallel_member_sync: false
|
||||
# Should Matrix m.notice-type messages be bridged?
|
||||
bridge_notices: true
|
||||
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
|
||||
# This field will automatically be changed back to false after it, except if the config file is not writable.
|
||||
resend_bridge_info: false
|
||||
# When using double puppeting, should muted chats be muted in Matrix?
|
||||
mute_bridging: false
|
||||
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
|
||||
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
|
||||
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
|
||||
archive_tag: null
|
||||
# Same as above, but for pinned chats. The favorite tag is called m.favourite
|
||||
pinned_tag: null
|
||||
# Should mute status and tags only be bridged when the portal room is created?
|
||||
tag_only_on_create: true
|
||||
# Should WhatsApp status messages be bridged into a Matrix room?
|
||||
# Disabling this won't affect already created status broadcast rooms.
|
||||
enable_status_broadcast: true
|
||||
# Should sending WhatsApp status messages be allowed?
|
||||
# This can cause issues if the user has lots of contacts, so it's disabled by default.
|
||||
disable_status_broadcast_send: true
|
||||
# Should the status broadcast room be muted and moved into low priority by default?
|
||||
# This is only applied when creating the room, the user can unmute it later.
|
||||
mute_status_broadcast: true
|
||||
# Tag to apply to the status broadcast room.
|
||||
status_broadcast_tag: m.lowpriority
|
||||
# Should the bridge use thumbnails from WhatsApp?
|
||||
# They're disabled by default due to very low resolution.
|
||||
whatsapp_thumbnail: false
|
||||
# Allow invite permission for user. User can invite any bots to room with whatsapp
|
||||
# users (private chat and groups)
|
||||
allow_user_invite: false
|
||||
# Whether or not created rooms should have federation enabled.
|
||||
# If false, created portal rooms will never be federated.
|
||||
federate_rooms: true
|
||||
# Should the bridge never send alerts to the bridge management room?
|
||||
# These are mostly things like the user being logged out.
|
||||
disable_bridge_alerts: false
|
||||
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
|
||||
# This is only safe on single-user bridges.
|
||||
crash_on_stream_replaced: false
|
||||
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
|
||||
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
|
||||
# key in the event content even if this is disabled.
|
||||
url_previews: false
|
||||
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
|
||||
# This is currently not supported in most clients.
|
||||
caption_in_message: false
|
||||
# Send galleries as a single event? This is not an MSC (yet).
|
||||
beeper_galleries: false
|
||||
# Should polls be sent using MSC3381 event types?
|
||||
extev_polls: false
|
||||
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
|
||||
cross_room_replies: false
|
||||
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
|
||||
# but they're being phased out and will be completely removed in the future.
|
||||
disable_reply_fallbacks: false
|
||||
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
|
||||
# Null means there's no enforced timeout.
|
||||
message_handling_timeout:
|
||||
# Send an error message after this timeout, but keep waiting for the response until the deadline.
|
||||
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
|
||||
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
|
||||
error_after: null
|
||||
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
|
||||
# This is counted from the time the bridge starts handling the message.
|
||||
deadline: 120s
|
||||
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: "!wa"
|
||||
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: "Hello, I'm a WhatsApp bridge bot."
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: "Use `help` for help."
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: "Use `help` for help or `login` to log in."
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ""
|
||||
|
||||
# End-to-bridge encryption support options.
|
||||
#
|
||||
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
encryption:
|
||||
# Allow encryption, work in group chat rooms with e2ee enabled
|
||||
allow: false
|
||||
# Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
default: false
|
||||
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
appservice: false
|
||||
# Require encryption, drop any unencrypted messages.
|
||||
require: false
|
||||
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# You must use a client that supports requesting keys from other users to use this feature.
|
||||
allow_key_sharing: false
|
||||
# Should users mentions be in the event wire content to enable the server to send push notifications?
|
||||
plaintext_mentions: false
|
||||
# Options for deleting megolm sessions from the bridge.
|
||||
delete_keys:
|
||||
# Beeper-specific: delete outbound sessions when hungryserv confirms
|
||||
# that the user has uploaded the key to key backup.
|
||||
delete_outbound_on_ack: false
|
||||
# Don't store outbound sessions in the inbound table.
|
||||
dont_store_outbound: false
|
||||
# Ratchet megolm sessions forward after decrypting messages.
|
||||
ratchet_on_decrypt: false
|
||||
# Delete fully used keys (index >= max_messages) after decrypting messages.
|
||||
delete_fully_used_on_decrypt: false
|
||||
# Delete previous megolm sessions from same device when receiving a new one.
|
||||
delete_prev_on_new_session: false
|
||||
# Delete megolm sessions received from a device when the device is deleted.
|
||||
delete_on_device_delete: false
|
||||
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
|
||||
periodically_delete_expired: false
|
||||
# Delete inbound megolm sessions that don't have the received_at field used for
|
||||
# automatic ratcheting and expired session deletion. This is meant as a migration
|
||||
# to delete old keys prior to the bridge update.
|
||||
delete_outdated_inbound: false
|
||||
# What level of device verification should be required from users?
|
||||
#
|
||||
# Valid levels:
|
||||
# unverified - Send keys to all device in the room.
|
||||
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# verified - Require manual per-device verification
|
||||
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
verification_levels:
|
||||
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
|
||||
receive: unverified
|
||||
# Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
send: unverified
|
||||
# Minimum level that the bridge should require for accepting key requests.
|
||||
share: cross-signed-tofu
|
||||
# Options for Megolm room key rotation. These options allow you to
|
||||
# configure the m.room.encryption event content. See:
|
||||
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# more information about that event.
|
||||
rotation:
|
||||
# Enable custom Megolm room key rotation settings. Note that these
|
||||
# settings will only apply to rooms created after this option is
|
||||
# set.
|
||||
enable_custom: false
|
||||
# The maximum number of milliseconds a session should be used
|
||||
# before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# as the default.
|
||||
milliseconds: 604800000
|
||||
# The maximum number of messages that should be sent with a given a
|
||||
# session before changing it. The Matrix spec recommends 100 as the
|
||||
# default.
|
||||
messages: 100
|
||||
|
||||
# Disable rotating keys when a user's devices change?
|
||||
# You should not enable this option unless you understand all the implications.
|
||||
disable_device_change_key_rotation: false
|
||||
|
||||
# Settings for provisioning API
|
||||
provisioning:
|
||||
# Prefix for the provisioning API paths.
|
||||
prefix: /_matrix/provision
|
||||
# Shared secret for authentication. If set to "generate", a random secret will be generated,
|
||||
# or if set to "disable", the provisioning API will be disabled.
|
||||
shared_secret: generate
|
||||
# Enable debug API at /debug with provisioning authentication.
|
||||
debug_endpoints: false
|
||||
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relay - Talk through the relaybot (if enabled), no access otherwise
|
||||
# user - Access to use the bridge to chat with a WhatsApp account.
|
||||
# admin - User level and some additional administration tools
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
"*": relay
|
||||
"example.com": user
|
||||
"@admin:example.com": admin
|
||||
|
||||
# Settings for relay mode
|
||||
relay:
|
||||
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
|
||||
# authenticated user into a relaybot for that chat.
|
||||
enabled: false
|
||||
# Should only admins be allowed to set themselves as relay users?
|
||||
admin_only: true
|
||||
# The formats to use when sending messages to WhatsApp via the relaybot.
|
||||
message_formats:
|
||||
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
|
||||
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
|
||||
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
|
||||
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
|
||||
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
|
||||
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
|
||||
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
|
||||
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
|
||||
|
||||
# Logging config. See https://github.com/tulir/zeroconfig for details.
|
||||
logging:
|
||||
min_level: debug
|
||||
writers:
|
||||
- type: stdout
|
||||
format: pretty-colored
|
||||
- type: file
|
||||
format: json
|
||||
filename: ./logs/mautrix-whatsapp.log
|
||||
max_size: 100
|
||||
max_backups: 10
|
||||
compress: true
|
30
apps/matrix/mautrix-whatsapp.statefulset.yaml
Normal file
30
apps/matrix/mautrix-whatsapp.statefulset.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mautrix-whatsapp
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mautrix-whatsapp
|
||||
serviceName: mautrix-whatsapp
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mautrix-whatsapp
|
||||
spec:
|
||||
containers:
|
||||
- name: mautrix-whatsapp
|
||||
image: mautrix-whatsapp
|
||||
volumeMounts:
|
||||
- name: persistence
|
||||
mountPath: /data
|
||||
# contains config.yaml
|
||||
securityContext:
|
||||
fsGroup: 1337
|
||||
|
||||
|
||||
volumes:
|
||||
- name: persistence
|
||||
persistentVolumeClaim:
|
||||
claimName: mautrix-whatsapp
|
23
apps/matrix/mautrix.pvc.yaml
Normal file
23
apps/matrix/mautrix.pvc.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mautrix-telegram
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mautrix-whatsapp
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
4
apps/matrix/namespace.yaml
Normal file
4
apps/matrix/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
20
apps/matrix/postgres.yaml
Normal file
20
apps/matrix/postgres.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: matrix-postgres
|
||||
spec:
|
||||
instances: 1
|
||||
imageName: ghcr.io/cloudnative-pg/postgresql:16
|
||||
bootstrap:
|
||||
initdb:
|
||||
owner: matrix
|
||||
database: matrix
|
||||
secret:
|
||||
name: postgres-credentials
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
storageClass: nfs-client
|
||||
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
62
apps/matrix/synapse.configmap.yaml
Normal file
62
apps/matrix/synapse.configmap.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: synapse
|
||||
data:
|
||||
# matrix.kluster.moll.re.log.config: |
|
||||
# version: 1
|
||||
|
||||
# formatters:
|
||||
# precise:
|
||||
# format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
# handlers:
|
||||
# console:
|
||||
# class: logging.StreamHandler
|
||||
# formatter: precise
|
||||
|
||||
# loggers:
|
||||
# # This is just here so we can leave `loggers` in the config regardless of whether
|
||||
# # we configure other loggers below (avoid empty yaml dict error).
|
||||
# _placeholder:
|
||||
# level: "INFO"
|
||||
|
||||
# synapse.storage.SQL:
|
||||
# # beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# # information such as access tokens.
|
||||
# level: INFO
|
||||
|
||||
|
||||
|
||||
# root:
|
||||
# level: INFO
|
||||
# handlers: [console]
|
||||
|
||||
homeserver.yaml: |
|
||||
server_name: "matrix.kluster.moll.re"
|
||||
report_stats: false
|
||||
# enable_registration: true
|
||||
# enable_registration_without_verification: true
|
||||
listeners:
|
||||
- port: 8448
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
# log_config: "./matrix.kluster.moll.re.log.config"
|
||||
media_store_path: /media_store
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: matrix
|
||||
password: "0ssdsdsdM6vbxhs.kdjsdasd9Z0qK5bdTwM6vbxh9Z"
|
||||
dbname: matrix
|
||||
host: matrix-postgres-rw
|
||||
cp_min: 5
|
||||
cp_max: 10
|
43
apps/matrix/synapse.deployment.yaml
Normal file
43
apps/matrix/synapse.deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: synapse
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synapse
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synapse
|
||||
spec:
|
||||
containers:
|
||||
- name: synapse
|
||||
image: synapse
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
ports:
|
||||
- containerPort: 8448
|
||||
env:
|
||||
- name: SYNAPSE_CONFIG_PATH
|
||||
value: /config/homeserver.yaml
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/homeserver.yaml
|
||||
subPath: homeserver.yaml
|
||||
- name: config-persistence
|
||||
mountPath: /config
|
||||
- name: media
|
||||
mountPath: /media_store
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: synapse
|
||||
- name: config-persistence
|
||||
emptyDir: {}
|
||||
- name: media
|
||||
emptyDir: {}
|
29
apps/matrix/synapse.ingress.yaml
Normal file
29
apps/matrix/synapse.ingress.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: synapse-federation
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`matrix.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: synapse
|
||||
port: 8448
|
||||
# auto route to the _matrix path
|
||||
middlewares:
|
||||
- name: matrix-redirect
|
||||
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: matrix-redirect
|
||||
spec:
|
||||
redirectRegex:
|
||||
regex: "^https://matrix.kluster.moll.re/(.*)"
|
||||
replacement: "https://matrix.kluster.moll.re/_matrix/$${1}"
|
||||
permanent: true
|
11
apps/matrix/synapse.service.yaml
Normal file
11
apps/matrix/synapse.service.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: synapse
|
||||
spec:
|
||||
selector:
|
||||
app: synapse
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8448
|
||||
targetPort: 8448
|
@@ -26,7 +26,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`media-backend.kluster.moll.re`)
|
||||
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
|
||||
middlewares:
|
||||
- name: jellyfin-websocket
|
||||
- name: jellyfin-server-headers
|
||||
|
17
apps/media/jellyfin.servicemonitor.yaml
Normal file
17
apps/media/jellyfin.servicemonitor.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: jellyfin
|
||||
labels:
|
||||
metrics: prometheus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: jellyfin-server-service
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
targetPort: jellyfin
|
||||
|
||||
# this exposes metrics on port 8096 as enabled in the jellyfin config
|
||||
# https://jellyfin.org/docs/general/networking/monitoring/
|
||||
# the metrics are available at /metrics but blocked by the ingress
|
@@ -10,6 +10,7 @@ resources:
|
||||
- web.deployment.yaml
|
||||
- web.service.yaml
|
||||
- ingress.yaml
|
||||
- jellyfin.servicemonitor.yaml
|
||||
|
||||
images:
|
||||
- name: jellyfin/jellyfin
|
||||
|
@@ -20,6 +20,7 @@ spec:
|
||||
cpu: "2"
|
||||
ports:
|
||||
- containerPort: 8096
|
||||
name: jellyfin
|
||||
env:
|
||||
- name: TZ
|
||||
value: Europe/Berlin
|
||||
|
@@ -3,6 +3,8 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: jellyfin-server
|
||||
labels:
|
||||
app: jellyfin-server-service
|
||||
spec:
|
||||
selector:
|
||||
app: jellyfin-server
|
||||
|
@@ -13,5 +13,3 @@ spec:
|
||||
port: 80
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
|
||||
|
||||
|
@@ -1,15 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: grafana-nfs
|
||||
labels:
|
||||
directory: grafana
|
||||
spec:
|
||||
# storageClassName: slow
|
||||
capacity:
|
||||
storage: "1Gi"
|
||||
# volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
@@ -21,15 +16,10 @@ kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: grafana-nfs
|
||||
spec:
|
||||
# storageClassName: slow
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "1Gi"
|
||||
selector:
|
||||
matchLabels:
|
||||
directory: grafana
|
||||
|
||||
|
||||
|
||||
volumeName: grafana-nfs
|
||||
|
@@ -1,149 +1,9 @@
|
||||
rbac:
|
||||
create: true
|
||||
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
|
||||
# useExistingRole: name-of-some-(cluster)role
|
||||
pspEnabled: true
|
||||
pspUseAppArmor: true
|
||||
namespaced: false
|
||||
extraRoleRules: []
|
||||
# - apiGroups: []
|
||||
# resources: []
|
||||
# verbs: []
|
||||
extraClusterRoleRules: []
|
||||
# - apiGroups: []
|
||||
# resources: []
|
||||
# verbs: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
nameTest:
|
||||
## Service account annotations. Can be templated.
|
||||
# annotations:
|
||||
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||
autoMount: true
|
||||
|
||||
replicas: 1
|
||||
|
||||
## Create a headless service for the deployment
|
||||
headlessService: false
|
||||
|
||||
## Create HorizontalPodAutoscaler object for deployment type
|
||||
#
|
||||
autoscaling:
|
||||
enabled: false
|
||||
# minReplicas: 1
|
||||
# maxReplicas: 10
|
||||
# metrics:
|
||||
# - type: Resource
|
||||
# resource:
|
||||
# name: cpu
|
||||
# targetAverageUtilization: 60
|
||||
# - type: Resource
|
||||
# resource:
|
||||
# name: memory
|
||||
# targetAverageUtilization: 60
|
||||
|
||||
## See `kubectl explain poddisruptionbudget.spec` for more
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||
podDisruptionBudget: {}
|
||||
# minAvailable: 1
|
||||
# maxUnavailable: 1
|
||||
|
||||
## See `kubectl explain deployment.spec.strategy` for more
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 30
|
||||
failureThreshold: 10
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName: "default-scheduler"
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 9.0.2
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Can be templated.
|
||||
##
|
||||
# pullSecrets:
|
||||
# - myRegistrKeySecretName
|
||||
|
||||
testFramework:
|
||||
enabled: true
|
||||
image: "bats/bats"
|
||||
tag: "v1.4.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext: {}
|
||||
|
||||
securityContext:
|
||||
runAsUser: 472
|
||||
runAsGroup: 472
|
||||
fsGroup: 472
|
||||
|
||||
containerSecurityContext:
|
||||
{}
|
||||
|
||||
# Extra configmaps to mount in grafana pods
|
||||
# Values are templated.
|
||||
extraConfigmapMounts: []
|
||||
# - name: certs-configmap
|
||||
# mountPath: /etc/grafana/ssl/
|
||||
# subPath: certificates.crt # (optional)
|
||||
# configMap: certs-configmap
|
||||
# readOnly: true
|
||||
|
||||
|
||||
extraEmptyDirMounts: []
|
||||
# - name: provisioning-notifiers
|
||||
# mountPath: /etc/grafana/provisioning/notifiers
|
||||
|
||||
|
||||
# Apply extra labels to common labels.
|
||||
extraLabels: {}
|
||||
|
||||
## Assign a PriorityClassName to pods if set
|
||||
# priorityClassName:
|
||||
|
||||
downloadDashboardsImage:
|
||||
repository: curlimages/curl
|
||||
tag: 7.73.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
downloadDashboards:
|
||||
env: {}
|
||||
envFromSecret: ""
|
||||
resources: {}
|
||||
|
||||
## Pod Annotations
|
||||
# podAnnotations: {}
|
||||
|
||||
## Pod Labels
|
||||
# podLabels: {}
|
||||
|
||||
podPortName: grafana
|
||||
|
||||
## Deployment annotations
|
||||
# annotations: {}
|
||||
|
||||
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
|
||||
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
|
||||
## ref: http://kubernetes.io/docs/user-guide/services/
|
||||
@@ -163,128 +23,10 @@ serviceMonitor:
|
||||
## https://github.com/coreos/prometheus-operator
|
||||
##
|
||||
enabled: false
|
||||
path: /metrics
|
||||
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
|
||||
labels: {}
|
||||
interval: 1m
|
||||
scheme: http
|
||||
tlsConfig: {}
|
||||
scrapeTimeout: 30s
|
||||
relabelings: []
|
||||
|
||||
extraExposePorts: []
|
||||
# - name: keycloak
|
||||
# port: 8080
|
||||
# targetPort: 8080
|
||||
# type: ClusterIP
|
||||
|
||||
# overrides pod.spec.hostAliases in the grafana deployment's pods
|
||||
hostAliases: []
|
||||
# - ip: "1.2.3.4"
|
||||
# hostnames:
|
||||
# - "my.host.com"
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
||||
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
||||
# ingressClassName: nginx
|
||||
# Values can be templated
|
||||
annotations: {
|
||||
kubernetes.io/ingress.class: nginx,
|
||||
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
|
||||
}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
|
||||
# pathType is only for k8s >= 1.1=
|
||||
pathType: Prefix
|
||||
|
||||
hosts:
|
||||
- grafana.kluster.moll.re
|
||||
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
|
||||
extraPaths: []
|
||||
# - path: /*
|
||||
# backend:
|
||||
# serviceName: ssl-redirect
|
||||
# servicePort: use-annotation
|
||||
## Or for k8s > 1.19
|
||||
# - path: /*
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: ssl-redirect
|
||||
# port:
|
||||
# name: use-annotation
|
||||
|
||||
|
||||
tls:
|
||||
- hosts:
|
||||
- grafana.kluster.moll.re
|
||||
secretName: cloudflare-letsencrypt-issuer-account-key
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
#
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment (evaluated as template)
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
## Additional init containers (evaluated as template)
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
##
|
||||
extraInitContainers: []
|
||||
|
||||
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
||||
extraContainers: ""
|
||||
# extraContainers: |
|
||||
# - name: proxy
|
||||
# image: quay.io/gambol99/keycloak-proxy:latest
|
||||
# args:
|
||||
# - -provider=github
|
||||
# - -client-id=
|
||||
# - -client-secret=
|
||||
# - -github-org=<ORG_NAME>
|
||||
# - -email-domain=*
|
||||
# - -cookie-secret=
|
||||
# - -http-address=http://0.0.0.0:4181
|
||||
# - -upstream-url=http://127.0.0.1:3000
|
||||
# ports:
|
||||
# - name: proxy-web
|
||||
# containerPort: 4181
|
||||
|
||||
## Volumes that can be used in init containers that will not be mounted to deployment pods
|
||||
extraContainerVolumes: []
|
||||
# - name: volume-from-secret
|
||||
# secret:
|
||||
# secretName: secret-to-mount
|
||||
# - name: empty-dir-volume
|
||||
# emptyDir: {}
|
||||
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
##
|
||||
enabled: false
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
@@ -318,556 +60,6 @@ initChownData:
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## initChownData container image
|
||||
##
|
||||
image:
|
||||
repository: busybox
|
||||
tag: "1.31.1"
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## initChownData resource requests and limits
|
||||
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
|
||||
# Administrator credentials when not using an existing secret (see below)
|
||||
adminUser: admin
|
||||
# adminPassword: strongpassword
|
||||
|
||||
# Use an existing secret for the admin user.
|
||||
admin:
|
||||
## Name of the secret. Can be templated.
|
||||
existingSecret: ""
|
||||
userKey: admin-user
|
||||
passwordKey: admin-password
|
||||
|
||||
## Define command to be executed at startup by grafana container
|
||||
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
|
||||
## Default is "run.sh" as defined in grafana's Dockerfile
|
||||
# command:
|
||||
# - "sh"
|
||||
# - "/run.sh"
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName:
|
||||
|
||||
## Extra environment variables that will be pass onto deployment pods
|
||||
##
|
||||
## to provide grafana with access to CloudWatch on AWS EKS:
|
||||
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
|
||||
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
|
||||
## same oidc eks provider as noted before (same as the existing line)
|
||||
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
|
||||
##
|
||||
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
|
||||
##
|
||||
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
|
||||
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
|
||||
##
|
||||
## env:
|
||||
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
## AWS_REGION: us-east-1
|
||||
##
|
||||
## 5. uncomment the EKS section in extraSecretMounts: below
|
||||
## 6. uncomment the annotation section in the serviceAccount: above
|
||||
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
|
||||
|
||||
env: {}
|
||||
|
||||
## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
|
||||
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
|
||||
## Renders in container spec as:
|
||||
## env:
|
||||
## ...
|
||||
## - name: <key>
|
||||
## valueFrom:
|
||||
## <value rendered as YAML>
|
||||
envValueFrom: {}
|
||||
# ENV_NAME:
|
||||
# configMapKeyRef:
|
||||
# name: configmap-name
|
||||
# key: value_key
|
||||
|
||||
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
|
||||
## This can be useful for auth tokens, etc. Value is templated.
|
||||
envFromSecret: ""
|
||||
|
||||
## Sensible environment variables that will be rendered as new secret object
|
||||
## This can be useful for auth tokens, etc
|
||||
envRenderSecret: {}
|
||||
|
||||
## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
|
||||
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
|
||||
## Name is templated.
|
||||
envFromSecrets: []
|
||||
## - name: secret-name
|
||||
## optional: true
|
||||
|
||||
## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
|
||||
## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key.
|
||||
## Name is templated.
|
||||
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
|
||||
envFromConfigMaps: []
|
||||
## - name: configmap-name
|
||||
## optional: true
|
||||
|
||||
# Inject Kubernetes services as environment variables.
|
||||
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
|
||||
enableServiceLinks: true
|
||||
|
||||
## Additional grafana server secret mounts
|
||||
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
|
||||
extraSecretMounts: []
|
||||
# - name: secret-files
|
||||
# mountPath: /etc/secrets
|
||||
# secretName: grafana-secret-files
|
||||
# readOnly: true
|
||||
# subPath: ""
|
||||
#
|
||||
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
|
||||
# - name: aws-iam-token
|
||||
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
|
||||
# readOnly: true
|
||||
# projected:
|
||||
# defaultMode: 420
|
||||
# sources:
|
||||
# - serviceAccountToken:
|
||||
# audience: sts.amazonaws.com
|
||||
# expirationSeconds: 86400
|
||||
# path: token
|
||||
#
|
||||
# for CSI e.g. Azure Key Vault use the following
|
||||
# - name: secrets-store-inline
|
||||
# mountPath: /run/secrets
|
||||
# readOnly: true
|
||||
# csi:
|
||||
# driver: secrets-store.csi.k8s.io
|
||||
# readOnly: true
|
||||
# volumeAttributes:
|
||||
# secretProviderClass: "akv-grafana-spc"
|
||||
# nodePublishSecretRef: # Only required when using service principal mode
|
||||
# name: grafana-akv-creds # Only required when using service principal mode
|
||||
|
||||
## Additional grafana server volume mounts
|
||||
# Defines additional volume mounts.
|
||||
extraVolumeMounts: []
|
||||
# - name: extra-volume-0
|
||||
# mountPath: /mnt/volume0
|
||||
# readOnly: true
|
||||
# existingClaim: volume-claim
|
||||
# - name: extra-volume-1
|
||||
# mountPath: /mnt/volume1
|
||||
# readOnly: true
|
||||
# hostPath: /usr/shared/
|
||||
|
||||
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
|
||||
lifecycleHooks: {}
|
||||
# postStart:
|
||||
# exec:
|
||||
# command: []
|
||||
|
||||
## Pass the plugins you want installed as a list.
|
||||
##
|
||||
plugins: []
|
||||
# - digrich-bubblechart-panel
|
||||
# - grafana-clock-panel
|
||||
|
||||
## Configure grafana datasources
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#datasources
|
||||
##
|
||||
datasources: {}
|
||||
# datasources.yaml:
|
||||
# apiVersion: 1
|
||||
# datasources:
|
||||
# - name: Prometheus
|
||||
# type: prometheus
|
||||
# url: http://prometheus-prometheus-server
|
||||
# access: proxy
|
||||
# isDefault: true
|
||||
# - name: CloudWatch
|
||||
# type: cloudwatch
|
||||
# access: proxy
|
||||
# uid: cloudwatch
|
||||
# editable: false
|
||||
# jsonData:
|
||||
# authType: default
|
||||
# defaultRegion: us-east-1
|
||||
|
||||
## Configure notifiers
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
|
||||
##
|
||||
notifiers: {}
|
||||
# notifiers.yaml:
|
||||
# notifiers:
|
||||
# - name: email-notifier
|
||||
# type: email
|
||||
# uid: email1
|
||||
# # either:
|
||||
# org_id: 1
|
||||
# # or
|
||||
# org_name: Main Org.
|
||||
# is_default: true
|
||||
# settings:
|
||||
# addresses: an_email_address@example.com
|
||||
# delete_notifiers:
|
||||
|
||||
## Configure grafana dashboard providers
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
|
||||
##
|
||||
## `path` must be /var/lib/grafana/dashboards/<provider_name>
|
||||
##
|
||||
dashboardProviders: {}
|
||||
# dashboardproviders.yaml:
|
||||
# apiVersion: 1
|
||||
# providers:
|
||||
# - name: 'default'
|
||||
# orgId: 1
|
||||
# folder: ''
|
||||
# type: file
|
||||
# disableDeletion: false
|
||||
# editable: true
|
||||
# options:
|
||||
# path: /var/lib/grafana/dashboards/default
|
||||
|
||||
## Configure grafana dashboard to import
|
||||
## NOTE: To use dashboards you must also enable/configure dashboardProviders
|
||||
## ref: https://grafana.com/dashboards
|
||||
##
|
||||
## dashboards per provider, use provider name as key.
|
||||
##
|
||||
dashboards: {}
|
||||
# default:
|
||||
# some-dashboard:
|
||||
# json: |
|
||||
# $RAW_JSON
|
||||
# custom-dashboard:
|
||||
# file: dashboards/custom-dashboard.json
|
||||
# prometheus-stats:
|
||||
# gnetId: 2
|
||||
# revision: 2
|
||||
# datasource: Prometheus
|
||||
# local-dashboard:
|
||||
# url: https://example.com/repository/test.json
|
||||
# token: ''
|
||||
# local-dashboard-base64:
|
||||
# url: https://example.com/repository/test-b64.json
|
||||
# token: ''
|
||||
# b64content: true
|
||||
|
||||
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
|
||||
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
|
||||
## ConfigMap data example:
|
||||
##
|
||||
## data:
|
||||
## example-dashboard.json: |
|
||||
## RAW_JSON
|
||||
##
|
||||
dashboardsConfigMaps: {}
|
||||
# default: ""
|
||||
|
||||
## Grafana's primary configuration
|
||||
## NOTE: values in map will be converted to ini format
|
||||
## ref: http://docs.grafana.org/installation/configuration/
|
||||
##
|
||||
grafana.ini:
|
||||
paths:
|
||||
data: /var/lib/grafana/
|
||||
logs: /var/log/grafana
|
||||
plugins: /var/lib/grafana/plugins
|
||||
provisioning: /etc/grafana/provisioning
|
||||
analytics:
|
||||
check_for_updates: true
|
||||
log:
|
||||
mode: console
|
||||
grafana_net:
|
||||
url: https://grafana.net
|
||||
## grafana Authentication can be enabled with the following values on grafana.ini
|
||||
# server:
|
||||
# The full public facing url you use in browser, used for redirects and emails
|
||||
# root_url:
|
||||
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
|
||||
# auth.github:
|
||||
# enabled: false
|
||||
# allow_sign_up: false
|
||||
# scopes: user:email,read:org
|
||||
# auth_url: https://github.com/login/oauth/authorize
|
||||
# token_url: https://github.com/login/oauth/access_token
|
||||
# api_url: https://api.github.com/user
|
||||
# team_ids:
|
||||
# allowed_organizations:
|
||||
# client_id:
|
||||
# client_secret:
|
||||
## LDAP Authentication can be enabled with the following values on grafana.ini
|
||||
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
|
||||
# auth.ldap:
|
||||
# enabled: true
|
||||
# allow_sign_up: true
|
||||
# config_file: /etc/grafana/ldap.toml
|
||||
|
||||
## Grafana's LDAP configuration
|
||||
## Templated by the template in _helpers.tpl
|
||||
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
|
||||
## ref: http://docs.grafana.org/installation/ldap/#configuration
|
||||
ldap:
|
||||
enabled: false
|
||||
# `existingSecret` is a reference to an existing secret containing the ldap configuration
|
||||
# for Grafana in a key `ldap-toml`.
|
||||
existingSecret: ""
|
||||
# `config` is the content of `ldap.toml` that will be stored in the created secret
|
||||
config: ""
|
||||
# config: |-
|
||||
# verbose_logging = true
|
||||
|
||||
# [[servers]]
|
||||
# host = "my-ldap-server"
|
||||
# port = 636
|
||||
# use_ssl = true
|
||||
# start_tls = false
|
||||
# ssl_skip_verify = false
|
||||
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
|
||||
|
||||
## Grafana's SMTP configuration
|
||||
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#smtp
|
||||
smtp:
|
||||
# `existingSecret` is a reference to an existing secret containing the smtp configuration
|
||||
# for Grafana.
|
||||
existingSecret: ""
|
||||
userKey: "user"
|
||||
passwordKey: "password"
|
||||
|
||||
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
|
||||
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
|
||||
sidecar:
|
||||
image:
|
||||
repository: quay.io/kiwigrid/k8s-sidecar
|
||||
tag: 1.15.6
|
||||
sha: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 50m
|
||||
# memory: 50Mi
|
||||
securityContext: {}
|
||||
# skipTlsVerify Set to true to skip tls verification for kube api calls
|
||||
# skipTlsVerify: true
|
||||
enableUniqueFilenames: false
|
||||
readinessProbe: {}
|
||||
livenessProbe: {}
|
||||
dashboards:
|
||||
enabled: false
|
||||
SCProvider: true
|
||||
# label that the configmaps with dashboards are marked with
|
||||
label: grafana_dashboard
|
||||
# value of label that the configmaps with dashboards are set to
|
||||
labelValue: null
|
||||
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
|
||||
folder: /tmp/dashboards
|
||||
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
|
||||
defaultFolderName: null
|
||||
# Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces.
|
||||
searchNamespace: null
|
||||
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
||||
watchMethod: WATCH
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
|
||||
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
|
||||
folderAnnotation: null
|
||||
# Absolute path to shell script to execute after a configmap got reloaded
|
||||
script: null
|
||||
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
||||
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
||||
# watchServerTimeout: 3600
|
||||
#
|
||||
# watchClientTimeout: is a client-side timeout, configuring your local socket.
|
||||
# If you have a network outage dropping all packets with no RST/FIN,
|
||||
# this is how long your client waits before realizing & dropping the connection.
|
||||
# defaults to 66sec (sic!)
|
||||
# watchClientTimeout: 60
|
||||
#
|
||||
# provider configuration that lets grafana manage the dashboards
|
||||
provider:
|
||||
# name of the provider, should be unique
|
||||
name: sidecarProvider
|
||||
# orgid as configured in grafana
|
||||
orgid: 1
|
||||
# folder in which the dashboards should be imported in grafana
|
||||
folder: ''
|
||||
# type of the provider
|
||||
type: file
|
||||
# disableDelete to activate a import-only behaviour
|
||||
disableDelete: false
|
||||
# allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
# allow Grafana to replicate dashboard structure from filesystem
|
||||
foldersFromFilesStructure: false
|
||||
# Additional dashboard sidecar volume mounts
|
||||
extraMounts: []
|
||||
# Sets the size limit of the dashboard sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
datasources:
|
||||
enabled: false
|
||||
# label that the configmaps with datasources are marked with
|
||||
label: grafana_datasource
|
||||
# value of label that the configmaps with datasources are set to
|
||||
labelValue: null
|
||||
# If specified, the sidecar will search for datasource config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
||||
watchMethod: WATCH
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# Endpoint to send request to reload datasources
|
||||
reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
|
||||
skipReload: false
|
||||
# Deploy the datasource sidecar as an initContainer in addition to a container.
|
||||
# This is needed if skipReload is true, to load any datasources defined at startup time.
|
||||
initDatasources: false
|
||||
# Sets the size limit of the datasource sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
plugins:
|
||||
enabled: false
|
||||
# label that the configmaps with plugins are marked with
|
||||
label: grafana_plugin
|
||||
# value of label that the configmaps with plugins are set to
|
||||
labelValue: null
|
||||
# If specified, the sidecar will search for plugin config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
||||
watchMethod: WATCH
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# Endpoint to send request to reload plugins
|
||||
reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
|
||||
skipReload: false
|
||||
# Deploy the datasource sidecar as an initContainer in addition to a container.
|
||||
# This is needed if skipReload is true, to load any plugins defined at startup time.
|
||||
initPlugins: false
|
||||
# Sets the size limit of the plugin sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
notifiers:
|
||||
enabled: false
|
||||
# label that the configmaps with notifiers are marked with
|
||||
label: grafana_notifier
|
||||
# If specified, the sidecar will search for notifier config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# Sets the size limit of the notifier sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
|
||||
## Override the deployment namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
|
||||
## Number of old ReplicaSets to retain
|
||||
##
|
||||
revisionHistoryLimit: 10
|
||||
|
||||
## Add a seperate remote image renderer deployment/service
|
||||
imageRenderer:
|
||||
# Enable the image-renderer deployment & service
|
||||
enabled: false
|
||||
replicas: 1
|
||||
image:
|
||||
# image-renderer Image repository
|
||||
repository: grafana/grafana-image-renderer
|
||||
# image-renderer Image tag
|
||||
tag: latest
|
||||
# image-renderer Image sha (optional)
|
||||
sha: ""
|
||||
# image-renderer ImagePullPolicy
|
||||
pullPolicy: Always
|
||||
# extra environment variables
|
||||
env:
|
||||
HTTP_HOST: "0.0.0.0"
|
||||
# RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
|
||||
# RENDERING_MODE: clustered
|
||||
# IGNORE_HTTPS_ERRORS: true
|
||||
# image-renderer deployment serviceAccount
|
||||
serviceAccountName: ""
|
||||
# image-renderer deployment securityContext
|
||||
securityContext: {}
|
||||
# image-renderer deployment Host Aliases
|
||||
hostAliases: []
|
||||
# image-renderer deployment priority class
|
||||
priorityClassName: ''
|
||||
service:
|
||||
# Enable the image-renderer service
|
||||
enabled: true
|
||||
# image-renderer service port name
|
||||
portName: 'http'
|
||||
# image-renderer service port used by both service and deployment
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
# If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
|
||||
grafanaProtocol: http
|
||||
# In case a sub_path is used this needs to be added to the image renderer callback
|
||||
grafanaSubPath: ""
|
||||
# name of the image-renderer port on the pod
|
||||
podPortName: http
|
||||
# number of image-renderer replica sets to keep
|
||||
revisionHistoryLimit: 10
|
||||
networkPolicy:
|
||||
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
|
||||
limitIngress: true
|
||||
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
|
||||
limitEgress: false
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 50m
|
||||
# memory: 50Mi
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
#
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment (evaluated as template)
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
# Create a dynamic manifests via values:
|
||||
extraObjects: []
|
||||
# - apiVersion: "kubernetes-client.io/v1"
|
||||
# kind: ExternalSecret
|
||||
# metadata:
|
||||
# name: grafana-secrets
|
||||
# spec:
|
||||
# backendType: gcpSecretsManager
|
||||
# data:
|
||||
# - key: grafana-admin-password
|
||||
# name: adminPassword
|
||||
|
||||
|
@@ -1,157 +0,0 @@
|
||||
## Default values.yaml for Telegraf
|
||||
## This is a YAML-formatted file.
|
||||
## ref: https://hub.docker.com/r/library/telegraf/tags/
|
||||
|
||||
image:
|
||||
repo: "telegraf"
|
||||
tag: "1.22"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Configure resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
resources:
|
||||
requests:
|
||||
memory: 256Mi
|
||||
cpu: 0.1
|
||||
limits:
|
||||
memory: 1Gi
|
||||
cpu: 1
|
||||
|
||||
## Pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
## Pod labels
|
||||
podLabels: {}
|
||||
|
||||
## Configure args passed to Telegraf containers
|
||||
args: []
|
||||
|
||||
## The name of a secret in the same kubernetes namespace which contains values to
|
||||
## be added to the environment (must be manually created)
|
||||
## This can be useful for auth tokens, etc.
|
||||
# envFromSecret: "telegraf-tokens"
|
||||
|
||||
## Environment
|
||||
env:
|
||||
# This pulls HOSTNAME from the node, not the pod.
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# In test clusters where hostnames are resolved in /etc/hosts on each node,
|
||||
# the HOSTNAME is not resolvable from inside containers
|
||||
# So inject the host IP as well
|
||||
- name: HOSTIP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
# Mount the host filesystem and set the appropriate env variables.
|
||||
# ref: https://github.com/influxdata/telegraf/blob/master/docs/FAQ.md
|
||||
# HOST_PROC is required by the cpu, disk, diskio, kernel and processes input plugins
|
||||
- name: "HOST_PROC"
|
||||
value: "/hostfs/proc"
|
||||
# HOST_SYS is required by the diskio plugin
|
||||
- name: "HOST_SYS"
|
||||
value: "/hostfs/sys"
|
||||
- name: "HOST_MOUNT_PREFIX"
|
||||
value: "/hostfs"
|
||||
|
||||
## Add custom volumes and mounts
|
||||
# volumes:
|
||||
# - name: telegraf-output-influxdb2
|
||||
# configMap:
|
||||
# name: "telegraf-output-influxdb2"
|
||||
# mountPoints:
|
||||
# - name: telegraf-output-influxdb2
|
||||
# mountPath: /etc/telegraf/conf.d
|
||||
# subPath: influxdb2.conf
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## If the DaemonSet should run on the host's network namespace
|
||||
## hostNetwork: true
|
||||
|
||||
## If using hostNetwork=true, set dnsPolicy to ClusterFirstWithHostNet
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#
|
||||
## dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
## If using dnsPolicy=None, set dnsConfig
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
|
||||
## dnsConfig:
|
||||
## nameservers:
|
||||
## - 1.2.3.4
|
||||
## searches:
|
||||
## - ns1.svc.cluster-domain.example
|
||||
## - my.dns.search.suffix
|
||||
## options:
|
||||
## - name: ndots
|
||||
## value: "2"
|
||||
## - name: edns0
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name:
|
||||
# Annotations for the ServiceAccount
|
||||
annotations: {}
|
||||
|
||||
## Specify priorityClassName
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
# priorityClassName: system-node-critical
|
||||
|
||||
# Specify the pod's SecurityContext, including the OS user and group to run the pod
|
||||
podSecurityContext: {}
|
||||
|
||||
override_config:
|
||||
toml: ~
|
||||
# Provide a literal TOML config
|
||||
# toml: |+
|
||||
# [global_tags]
|
||||
# foo = "bar"
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# [[inputs.mem]]
|
||||
# [[outputs.influxdb_v2]]
|
||||
# urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
|
||||
# bucket = "data"
|
||||
# organization = "OurCompany"
|
||||
# token = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
## Exposed telegraf configuration
|
||||
## ref: https://docs.influxdata.com/telegraf/v1.13/administration/configuration/
|
||||
config:
|
||||
# global_tags:
|
||||
# cluster: "mycluster"
|
||||
agent:
|
||||
interval: "10s"
|
||||
round_interval: true
|
||||
metric_batch_size: 1000
|
||||
metric_buffer_limit: 10000
|
||||
collection_jitter: "0s"
|
||||
flush_interval: "10s"
|
||||
flush_jitter: "0s"
|
||||
precision: ""
|
||||
debug: false
|
||||
quiet: false
|
||||
logfile: ""
|
||||
hostname: "$HOSTNAME"
|
||||
omit_hostname: false
|
||||
outputs:
|
||||
- influxdb_v2:
|
||||
urls:
|
||||
- "http://influxdb-influxdb2.monitoring:80"
|
||||
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
|
||||
organization: "influxdata"
|
||||
bucket: "kluster"
|
||||
monitor_self: false
|
||||
docker_endpoint: "unix:///run/k3s/containerd/containerd.sock"
|
||||
|
@@ -1,15 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: influxdb-nfs
|
||||
labels:
|
||||
directory: influxdb
|
||||
spec:
|
||||
# storageClassName: slow
|
||||
capacity:
|
||||
storage: "10Gi"
|
||||
# volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
@@ -27,6 +22,4 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
selector:
|
||||
matchLabels:
|
||||
directory: influxdb
|
||||
volumeName: influxdb-nfs
|
@@ -1,92 +1,3 @@
|
||||
image:
|
||||
repository: influxdb
|
||||
tag: 2.3.0-alpine
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Annotations to be added to InfluxDB pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
## Labels to be added to InfluxDB pods
|
||||
##
|
||||
podLabels: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
## Configure resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
securityContext: {}
|
||||
|
||||
## Customize liveness, readiness and startup probes
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
##
|
||||
livenessProbe: {}
|
||||
# path: "/health"
|
||||
# scheme: "HTTP"
|
||||
# initialDelaySeconds: 0
|
||||
# periodSeconds: 10
|
||||
# timeoutSeconds: 1
|
||||
# failureThreshold: 3
|
||||
|
||||
readinessProbe: {}
|
||||
# path: "/health"
|
||||
# scheme: "HTTP"
|
||||
# initialDelaySeconds: 0
|
||||
# periodSeconds: 10
|
||||
# timeoutSeconds: 1
|
||||
# successThreshold: 1
|
||||
# failureThreshold: 3
|
||||
|
||||
startupProbe:
|
||||
enabled: false
|
||||
# path: "/health"
|
||||
# scheme: "HTTP"
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 5
|
||||
# timeoutSeconds: 1
|
||||
# failureThreshold: 6
|
||||
|
||||
## Extra environment variables to configure influxdb
|
||||
## e.g.
|
||||
# env:
|
||||
# - name: FOO
|
||||
# value: BAR
|
||||
# - name: BAZ
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
env: {}
|
||||
|
||||
## Create default user through docker entrypoint
|
||||
## Defaults indicated below
|
||||
##
|
||||
@@ -100,10 +11,6 @@ adminUser:
|
||||
password: ""
|
||||
token: ""
|
||||
|
||||
## The password and token are obtained from an existing secret. The expected
|
||||
## keys are `admin-password` and `admin-token`.
|
||||
## If set, the password and token values above are ignored.
|
||||
# existingSecret: influxdb-auth
|
||||
|
||||
## Persist data to a persistent volume
|
||||
##
|
||||
@@ -113,83 +20,7 @@ persistence:
|
||||
useExisting: true
|
||||
## Name of existing PVC to be used in the influx deployment
|
||||
name: influxdb-nfs
|
||||
## influxdb data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
mountPath: /var/lib/influxdb2
|
||||
subPath: ""
|
||||
|
||||
## Add custom volume and volumeMounts
|
||||
##
|
||||
# volumes:
|
||||
# - name: influxdb2-templates
|
||||
# hostPath:
|
||||
# path: /data/influxdb2-templates
|
||||
# type: Directory
|
||||
# mountPoints:
|
||||
# - name: influxdb2-templates
|
||||
# mountPath: /influxdb2-templates
|
||||
# readOnly: true
|
||||
|
||||
## Allow executing custom init scripts
|
||||
## If the container finds any files with the .sh extension inside of the
|
||||
## /docker-entrypoint-initdb.d folder, it will execute them.
|
||||
## When multiple scripts are present, they will be executed in lexical sort order by name.
|
||||
## For more details see Custom Initialization Scripts in https://hub.docker.com/_/influxdb
|
||||
initScripts:
|
||||
enabled: false
|
||||
scripts:
|
||||
init.sh: |+
|
||||
#!/bin/bash
|
||||
influx apply --force yes -u https://raw.githubusercontent.com/influxdata/community-templates/master/influxdb2_operational_monitoring/influxdb2_operational_monitoring.yml
|
||||
|
||||
## Specify a service type
|
||||
## ref: http://kubernetes.io/docs/user-guide/services/
|
||||
##
|
||||
service:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.3.4
|
||||
port: 80
|
||||
targetPort: 8086
|
||||
annotations: {}
|
||||
labels: {}
|
||||
portName: http
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# Annotations for the ServiceAccount
|
||||
annotations: {}
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
||||
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
||||
# className: nginx
|
||||
tls: false
|
||||
# secretName: my-tls-cert # only needed if tls above is true or default certificate is not configured for Nginx
|
||||
hostname: influxdb.foobar.com
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: "nginx"
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
|
||||
## Pod disruption budget configuration
|
||||
##
|
||||
pdb:
|
||||
## Specifies whether a Pod disruption budget should be created
|
||||
##
|
||||
create: true
|
||||
minAvailable: 1
|
||||
# maxUnavailable: 1
|
||||
|
||||
|
34
apps/monitoring/kustomization.yaml
Normal file
34
apps/monitoring/kustomization.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: monitoring
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- grafana.pvc.yaml
|
||||
- influxdb.pvc.yaml
|
||||
- grafana.ingress.yaml
|
||||
# prometheus-operator crds
|
||||
- https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.70.0/bundle.yaml
|
||||
- prometheus.yaml
|
||||
- thanos-objstore-config.sealedsecret.yaml
|
||||
|
||||
|
||||
helmCharts:
|
||||
- releaseName: grafana
|
||||
name: grafana
|
||||
repo: https://grafana.github.io/helm-charts
|
||||
version: 7.3.0
|
||||
valuesFile: grafana.values.yaml
|
||||
|
||||
- releaseName: influxdb
|
||||
name: influxdb2
|
||||
repo: https://helm.influxdata.com/
|
||||
version: 2.1.2
|
||||
valuesFile: influxdb.values.yaml
|
||||
|
||||
- releaseName: telegraf-speedtest
|
||||
name: telegraf
|
||||
repo: https://helm.influxdata.com/
|
||||
version: 1.8.39
|
||||
valuesFile: telegraf-speedtest.values.yaml
|
4
apps/monitoring/namespace.yaml
Normal file
4
apps/monitoring/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
75
apps/monitoring/prometheus.yaml
Normal file
75
apps/monitoring/prometheus.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/metrics
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs: ["get", "list", "watch"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: monitoring # needs to be the same as in the kustomization.yaml
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: prometheus
|
||||
spec:
|
||||
resources:
|
||||
requests:
|
||||
memory: 400Mi
|
||||
serviceAccountName: prometheus
|
||||
enableAdminAPI: false
|
||||
serviceMonitorNamespaceSelector: {}
|
||||
serviceMonitorSelector: {}
|
||||
thanos:
|
||||
version: v0.33.0
|
||||
objectStorageConfig:
|
||||
# loads the config from a secret named thanos-objstore-config in the same namespace
|
||||
key: thanos.yaml
|
||||
name: thanos-objstore-config
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9090
|
||||
targetPort: 9090
|
||||
protocol: TCP
|
||||
selector:
|
||||
prometheus: prometheus
|
@@ -1,167 +0,0 @@
|
||||
## Default values.yaml for Telegraf
|
||||
## This is a YAML-formatted file.
|
||||
## ref: https://hub.docker.com/r/library/telegraf/tags/
|
||||
|
||||
replicaCount: 1
|
||||
image:
|
||||
repo: "telegraf"
|
||||
tag: "1.25"
|
||||
pullPolicy: IfNotPresent
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
imagePullSecrets: []
|
||||
## Configure args passed to Telegraf containers
|
||||
args: []
|
||||
# The name of a secret in the same kubernetes namespace which contains values to
|
||||
# be added to the environment (must be manually created)
|
||||
# This can be useful for auth tokens, etc.
|
||||
|
||||
# envFromSecret: "telegraf-tokens"
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
value: "telegraf-polling-service"
|
||||
# An older "volumeMounts" key was previously added which will likely
|
||||
# NOT WORK as you expect. Please use this newer configuration.
|
||||
|
||||
# volumes:
|
||||
# - name: telegraf-output-influxdb2
|
||||
# configMap:
|
||||
# name: "telegraf-output-influxdb2"
|
||||
# mountPoints:
|
||||
# - name: telegraf-output-influxdb2
|
||||
# mountPath: /etc/telegraf/conf.d
|
||||
# subPath: influxdb2.conf
|
||||
|
||||
## Configure resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
resources: {}
|
||||
# requests:
|
||||
# memory: 128Mi
|
||||
# cpu: 100m
|
||||
# limits:
|
||||
# memory: 128Mi
|
||||
# cpu: 100m
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
## Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
## Tolerations for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
# Create only for the release namespace or cluster wide (Role vs ClusterRole)
|
||||
clusterWide: false
|
||||
# Rules for the created rule
|
||||
rules: []
|
||||
# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be
|
||||
# able to scan the pods for scraping labels. The following rules have been taken from:
|
||||
# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46
|
||||
# - apiGroups:
|
||||
# - ""
|
||||
# resources:
|
||||
# - nodes
|
||||
# - nodes/proxy
|
||||
# - nodes/metrics
|
||||
# - services
|
||||
# - endpoints
|
||||
# - pods
|
||||
# - ingresses
|
||||
# - configmaps
|
||||
# verbs:
|
||||
# - get
|
||||
# - list
|
||||
# - watch
|
||||
# - apiGroups:
|
||||
# - "extensions"
|
||||
# resources:
|
||||
# - ingresses/status
|
||||
# - ingresses
|
||||
# verbs:
|
||||
# - get
|
||||
# - list
|
||||
# - watch
|
||||
# - nonResourceURLs:
|
||||
# - "/metrics"
|
||||
# verbs:
|
||||
# - get
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: false
|
||||
## Exposed telegraf configuration
|
||||
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
|
||||
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
|
||||
config:
|
||||
agent:
|
||||
interval: "2m"
|
||||
round_interval: true
|
||||
metric_batch_size: 1000
|
||||
metric_buffer_limit: 10000
|
||||
collection_jitter: "0s"
|
||||
flush_interval: "10s"
|
||||
flush_jitter: "0s"
|
||||
precision: ""
|
||||
debug: false
|
||||
quiet: false
|
||||
logfile: ""
|
||||
hostname: "$HOSTNAME"
|
||||
omit_hostname: false
|
||||
processors:
|
||||
- enum:
|
||||
mapping:
|
||||
field: "status"
|
||||
dest: "status_code"
|
||||
value_mappings:
|
||||
healthy: 1
|
||||
problem: 2
|
||||
critical: 3
|
||||
outputs:
|
||||
- influxdb_v2:
|
||||
urls:
|
||||
- "http://influxdb-influxdb2.monitoring:80"
|
||||
token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA==
|
||||
organization: "influxdata"
|
||||
bucket: "homeassistant"
|
||||
inputs:
|
||||
- http:
|
||||
urls:
|
||||
- "http://adguard-home.adguard:3000/control/stats"
|
||||
data_format: "json"
|
||||
metrics:
|
||||
health:
|
||||
enabled: false
|
||||
service_address: "http://:8888"
|
||||
threshold: 5000.0
|
||||
internal:
|
||||
enabled: true
|
||||
collect_memstats: false
|
||||
# Lifecycle hooks
|
||||
# hooks:
|
||||
# postStart: ["/bin/sh", "-c", "echo Telegraf started"]
|
||||
# preStop: ["/bin/sh", "-c", "sleep 60"]
|
||||
|
||||
## Pod disruption budget configuration
|
||||
##
|
||||
pdb:
|
||||
## Specifies whether a Pod disruption budget should be created
|
||||
##
|
||||
create: true
|
||||
minAvailable: 1
|
||||
# maxUnavailable: 1
|
||||
|
@@ -1,51 +1,7 @@
|
||||
## Default values.yaml for Telegraf
|
||||
## This is a YAML-formatted file.
|
||||
## ref: https://hub.docker.com/r/library/telegraf/tags/
|
||||
|
||||
replicaCount: 1
|
||||
image:
|
||||
repo: "telegraf"
|
||||
tag: "1.25"
|
||||
pullPolicy: IfNotPresent
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
imagePullSecrets: []
|
||||
## Configure args passed to Telegraf containers
|
||||
args: []
|
||||
# The name of a secret in the same kubernetes namespace which contains values to
|
||||
# be added to the environment (must be manually created)
|
||||
# This can be useful for auth tokens, etc.
|
||||
|
||||
# envFromSecret: "telegraf-tokens"
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
value: "telegraf-speedtest"
|
||||
|
||||
## Configure resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
resources: {}
|
||||
# requests:
|
||||
# memory: 128Mi
|
||||
# cpu: 100m
|
||||
# limits:
|
||||
# memory: 128Mi
|
||||
# cpu: 100m
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
## Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
## Tolerations for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
service:
|
||||
enabled: false
|
||||
rbac:
|
||||
@@ -94,17 +50,3 @@ config:
|
||||
inputs:
|
||||
- internet_speed:
|
||||
enable_file_download: false
|
||||
|
||||
# Lifecycle hooks
|
||||
# hooks:
|
||||
# postStart: ["/bin/sh", "-c", "echo Telegraf started"]
|
||||
# preStop: ["/bin/sh", "-c", "sleep 60"]
|
||||
|
||||
## Pod disruption budget configuration
|
||||
##
|
||||
pdb:
|
||||
## Specifies whether a Pod disruption budget should be created
|
||||
##
|
||||
create: true
|
||||
minAvailable: 1
|
||||
# maxUnavailable: 1
|
||||
|
16
apps/monitoring/thanos-objstore-config.sealedsecret.yaml
Normal file
16
apps/monitoring/thanos-objstore-config.sealedsecret.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: thanos-objstore-config
|
||||
namespace: monitoring
|
||||
spec:
|
||||
encryptedData:
|
||||
thanos.yaml: AgCXlr7NO2DoH1R0ngtDFi8rgJaDnW5WSmOMjvXF4GMcEjnn1kwQMLkF0Xz1BUB5GlQkTAg+ZjCWGMlfycBmUnZb+koZK3X1YLsk1BxBxtuSqhj35iQYxKQ7rAlsz7FxUQjK2oiJkFeQmo/rwcw6l6vZJ73+THYSebR9mLQ/H0pnmJM3ldLX4iWL2H8BZ7ftOYdXO7Xv0lk2k2L4O4LgnB1Uedpyk0HLVxAv3VdVU/RFpHm5Q7kudrCMm9ENcJG7qIWuii8GkysvEefbo2phgKn1Zr5XR6SyekuW2e6FyHe9us5Pv5HnJ6Z2+ZyewygaGgHiRqtxRMaLbahICewfSHwyGzeAD2kdgwVyJYXxVPV9qKQvZmj0ZDCDZ5K548mSUq7nNXSI9M9AJBTKUoqb2FXK3pqn4yh9M1l+7Pmno5Fs22blAyGsRqO32GxrYvEXPpdSeqHRjOMYTnbPuteGRKcvmSEUSuHzkeoTzU1Jh4Sg0ygtQUNIKtbwhJm1XpbJ0oaR5ukWMxPfpDv+B5FmrDsU/I+o62+NtCLQLkK6MoRBFiJ1kymtKkM3vQ1CVg4Vtc5Gc2D6mMu5K8kEuUODweBb8qPnYH7ULfTYORldj3d+Fb2mGF5mAU6xHMzbocsdgZpbAzUP/FfJmMMDWf4aW3LJ1mBjUD06KAwPsQvbTm6VInrdXh2QVb4UIp41kbyK8sanHrvh3bprHloxt8OnTZ2HQl+XN+kxYirkVkL34lIlk7KdYCWqO7QqH0ncd9WF0f9mpPGbxo3J
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: thanos-objstore-config
|
||||
namespace: monitoring
|
||||
type: Opaque
|
5
apps/nextcloud/README.md
Normal file
5
apps/nextcloud/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
### Runninf `occ` commands:
|
||||
|
||||
```
|
||||
su -s /bin/bash www-data -c "php occ user:list"
|
||||
```
|
@@ -1,7 +1,6 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
namespace: nextcloud
|
||||
name: nextcloud-ingressroute
|
||||
|
||||
spec:
|
||||
@@ -14,4 +13,4 @@ spec:
|
||||
- name: nextcloud
|
||||
port: 8080
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
certResolver: default-tls
|
||||
|
16
apps/nextcloud/kustomization.yaml
Normal file
16
apps/nextcloud/kustomization.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- ingress.yaml
|
||||
- pvc.yaml
|
||||
- postgres.sealedsecret.yaml
|
||||
|
||||
namespace: nextcloud
|
||||
|
||||
helmCharts:
|
||||
- name: nextcloud
|
||||
releaseName: nextcloud
|
||||
version: 4.5.5
|
||||
valuesFile: values.yaml
|
||||
repo: https://nextcloud.github.io/helm/
|
4
apps/nextcloud/namespace.yaml
Normal file
4
apps/nextcloud/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
22
apps/nextcloud/postgres.sealedsecret.yaml
Normal file
22
apps/nextcloud/postgres.sealedsecret.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"kind": "SealedSecret",
|
||||
"apiVersion": "bitnami.com/v1alpha1",
|
||||
"metadata": {
|
||||
"name": "postgres-password",
|
||||
"namespace": "nextcloud",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "postgres-password",
|
||||
"namespace": "nextcloud",
|
||||
"creationTimestamp": null
|
||||
}
|
||||
},
|
||||
"encryptedData": {
|
||||
"password": "AgCTmvBe9YFnyWOdz02rxr0hTXnWuVLeUt5dpieWMzl4cVMBj7WcyyODWtNd+eQOLARRssGNZAP4C9gH90iVRFAW1aU+NeA76oceXE5Kiiqoc8T30wE5FC6/UbTjQYRH520NF4wcCQKm//iH8o5uI2+NxZW4goeuShibXK9sijFVNXxUuTeXTmaSJjEPyB+pnmPwjzw+qjhkJJADefh9oryy5+t9ecCwXDiI/2ce2n1Vawm/Nq6/0rZMUSsF8XSiTFczKMunuGMhxGEyyx/I8NZd4XMXGSnBo0YZF7jR9+eRHIjuenPHq1kfEid2Ps4fhFSE8mEecnK7w5xE3r0XeTNHQcTId1yYneK/LQfcRkzInuRddytTwTAmsoSjROcjKjAvtyZSM81pFWJsMQ7bSVXOC0K2wvEz9khDT0RIoR/8tMh2G737F15raTe9Ggbgy3DHst4mYIpoWV/slHrOF0vR9j7X+MRN9R1cVtI1coof/tVSWQsLvv0AJfB4/6dUl+i/yNO/j+4c3WolGwqyXd+oxsZK1VrSwSCBZwBO17BmePJL2QsPVRdutq06TrlvGqP4wXySH9LRuHr3sWgr2VuDV00w+UvuU7ExI+16dWh7jrn/rvIBQSJlHDhl5+VpyM0WTMy5kSfO6nits73ZzT7BAoSU7AeQOMj3t+cUiEq9f9dk7em7QxWMuWg6QIJ+ZZ2+CCBms4rSE4x2glOxanNX/HktQg==",
|
||||
"username": "AgCxJKzhsF7yNJesK5oLJP62kjFnX4UUNQ2NrHl02Hv6MAzi/AUEV3uJSXXIi3H/uMJSMxRpJQjIDsrznYVI0YHOoz1M8/y1dx8xotFv/i0XByI9sMuGtesop7ncmQbEPMaJ3pqTJyaGkEwcsEMGmwwYiRfJHmEhhCYtzEc5IAnx+nmk//HYsrSWKpJGSWl0LvdMJsnsTxrWoJjaYTW3J0Of3VOOmgkuwIFKyXW9S2cUbAco8xVYchbyiHc8LXbS3izyAidRzg1OWyqvTGMIKJDQZ3ibIiXheon5ZeYjj0fkEkv3TrB7WoKdo0090OY1eHabqAPHT8aP+WG1g6TAzbJEtg+zFfYDKIw5Tp1WkRlsD2me4HycGuZbsaXgP5vWlxF5+rULUzUgxfmTRmYTl0H8kIlmUrusZwxR5ZXnSuBJ3n3AMEjmpmTTALakxEFEPDJJoVbgcViLtANwk72yu15FlOxczT22uyW8FMkj9kYzcq/+2a/EjaTo62SnUYJ3UTQXvgMKML1yJD+zym2+xscPNmwZFBPN5BQ/64ru/Z51nWB20fWFgW3Rw67jEQMajmVclmUcASWOjHzO87feEprHeilTH+224IHzpmC4aLz/JtIP9EEvqfDUr3fRrxcgtT1DgxV37vPj6Pqn47MHr39AA850CxjFmb1VcwfH6ygXABFlxnVByZDn7xCyBNswtKJqtw=="
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,13 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
namespace: nextcloud
|
||||
name: nextcloud-nfs
|
||||
spec:
|
||||
# storageClassName: fast
|
||||
capacity:
|
||||
storage: "150Gi"
|
||||
# volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
@@ -17,7 +14,6 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
namespace: nextcloud
|
||||
name: nextcloud-nfs
|
||||
spec:
|
||||
storageClassName: ""
|
||||
@@ -27,3 +23,29 @@ spec:
|
||||
requests:
|
||||
storage: "150Gi"
|
||||
volumeName: nextcloud-nfs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: nextcloud-syncthing-shared
|
||||
spec:
|
||||
capacity:
|
||||
storage: "150Gi"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
nfs:
|
||||
path: /kluster/syncthing
|
||||
server: 192.168.1.157
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nextcloud-syncthing-shared
|
||||
spec:
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "150Gi"
|
||||
volumeName: nextcloud-syncthing-shared
|
17
apps/nextcloud/readme.md
Normal file
17
apps/nextcloud/readme.md
Normal file
@@ -0,0 +1,17 @@
|
||||
## Running occ commands
|
||||
|
||||
Sometimes you need to run a command on the Nextcloud container directly. You can do that by running commands as the user www-data via the kubectl exec command.
|
||||
```
|
||||
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
|
||||
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ myocccomand"
|
||||
```
|
||||
Here are some examples below.
|
||||
Putting Nextcloud into maintanence mode
|
||||
|
||||
Some admin actions require you to put your Nextcloud instance into
|
||||
|
||||
(e.g. backups):
|
||||
```
|
||||
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
|
||||
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ maintenance:mode --on"
|
||||
```
|
@@ -1,31 +1,10 @@
|
||||
## Official nextcloud image version
|
||||
## ref: https://hub.docker.com/r/library/nextcloud/tags/
|
||||
##
|
||||
image:
|
||||
repository: nextcloud
|
||||
tag: "27" # needs to be a string because of the template
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
podAnnotations: {}
|
||||
deploymentAnnotations: {}
|
||||
|
||||
# Number of replicas to be deployed
|
||||
replicaCount: 1
|
||||
|
||||
## Allowing use of ingress controllers
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
##
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
|
||||
# Allow configuration of lifecycle hooks
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
|
||||
lifecycle: {}
|
||||
# postStartCommand: []
|
||||
# preStopCommand: []
|
||||
|
||||
nextcloud:
|
||||
host: nextcloud.kluster.moll.re
|
||||
@@ -67,6 +46,15 @@ nextcloud:
|
||||
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
|
||||
configs: {}
|
||||
|
||||
extraVolumes:
|
||||
- name: my-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: nextcloud-nfs
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: my-volume
|
||||
mountPath: /var/www/html/my-volume
|
||||
|
||||
# For example, to use S3 as primary storage
|
||||
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
|
||||
#
|
||||
@@ -87,52 +75,12 @@ nextcloud:
|
||||
# )
|
||||
# );
|
||||
|
||||
## Strategy used to replace old pods
|
||||
## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
strategy:
|
||||
type: Recreate
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 1
|
||||
# maxUnavailable: 0
|
||||
|
||||
##
|
||||
## Extra environment variables
|
||||
extraEnv:
|
||||
# - name: SOME_SECRET_ENV
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: nextcloud
|
||||
# key: secret_key
|
||||
|
||||
# Extra mounts for the pods. Example shown is for connecting a legacy NFS volume
|
||||
# to NextCloud pods in Kubernetes. This can then be configured in External Storage
|
||||
extraVolumes:
|
||||
# - name: nfs
|
||||
# nfs:
|
||||
# server: "10.0.0.1"
|
||||
# path: "/nextcloud_data"
|
||||
# readOnly: false
|
||||
extraVolumeMounts:
|
||||
# - name: nfs
|
||||
# mountPath: "/legacy_data"
|
||||
|
||||
# Extra secuurityContext parameters. For example you may need to define runAsNonRoot directive
|
||||
# extraSecurityContext:
|
||||
# runAsUser: "33"
|
||||
# runAsGroup: "33"
|
||||
# runAsNonRoot: true
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
nginx:
|
||||
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
|
||||
enabled: false
|
||||
resources: {}
|
||||
|
||||
internalDatabase:
|
||||
enabled: true
|
||||
name: nextcloud
|
||||
enabled: false
|
||||
|
||||
##
|
||||
## External database configuration
|
||||
@@ -146,53 +94,33 @@ externalDatabase:
|
||||
## Database host
|
||||
host: postgres-postgresql.postgres
|
||||
|
||||
## Database user
|
||||
user: nextcloud
|
||||
|
||||
## Database password
|
||||
password: test
|
||||
|
||||
## Database name
|
||||
database: nextcloud
|
||||
|
||||
## Use a existing secret
|
||||
existingSecret:
|
||||
enabled: false
|
||||
# secretName: nameofsecret
|
||||
# usernameKey: username
|
||||
# passwordKey: password
|
||||
enabled: true
|
||||
secretName: postgres-password
|
||||
usernameKey: username
|
||||
passwordKey: password
|
||||
|
||||
##
|
||||
## MariaDB chart configuration
|
||||
##
|
||||
mariadb:
|
||||
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
|
||||
enabled: false
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
##
|
||||
## Redis chart configuration
|
||||
## for more options see https://github.com/bitnami/charts/tree/master/bitnami/redis
|
||||
##
|
||||
|
||||
redis:
|
||||
enabled: false
|
||||
auth:
|
||||
enabled: true
|
||||
password: 'changeme'
|
||||
|
||||
## Cronjob to execute Nextcloud background tasks
|
||||
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron
|
||||
##
|
||||
cronjob:
|
||||
enabled: false
|
||||
# Nexcl
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
loadBalancerIP: nil
|
||||
nodePort: nil
|
||||
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
@@ -201,33 +129,14 @@ persistence:
|
||||
# Nextcloud Data (/var/www/html)
|
||||
enabled: true
|
||||
annotations: {}
|
||||
## nextcloud data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## Requires persistence.enabled: true
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
existingClaim: nextcloud-nfs
|
||||
|
||||
accessMode: ReadWriteOnce
|
||||
size: 150Gi
|
||||
|
||||
## Use an additional pvc for the data directory rather than a subpath of the default PVC
|
||||
## Useful to store data on a different storageClass (e.g. on slower disks)
|
||||
nextcloudData:
|
||||
enabled: false
|
||||
subPath:
|
||||
annotations: {}
|
||||
# storageClass: "-"
|
||||
# existingClaim:
|
||||
accessMode: ReadWriteOnce
|
||||
size: 8Gi
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
@@ -241,31 +150,9 @@ resources:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
## Liveness and readiness probe values
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 250
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 250
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 250
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 30
|
||||
successThreshold: 1
|
||||
|
||||
# disable when upgrading from a previous chart version
|
||||
|
||||
## Enable pod autoscaling using HorizontalPodAutoscaler
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
|
||||
@@ -273,13 +160,6 @@ startupProbe:
|
||||
hpa:
|
||||
enabled: false
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
|
||||
## Prometheus Exporter / Metrics
|
||||
##
|
||||
metrics:
|
||||
@@ -288,7 +168,3 @@ metrics:
|
||||
|
||||
rbac:
|
||||
enabled: false
|
||||
serviceaccount:
|
||||
create: true
|
||||
name: nextcloud-serviceaccount
|
||||
|
||||
|
37
apps/recipes/deployment.yaml
Normal file
37
apps/recipes/deployment.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mealie
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mealie
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mealie
|
||||
spec:
|
||||
containers:
|
||||
- name: mealie
|
||||
image: mealie
|
||||
resources:
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
cpu: "500m"
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
env:
|
||||
- name: ALLOW_SIGNUP
|
||||
value: "true"
|
||||
- name: TZ
|
||||
value: Europe/Paris
|
||||
- name: BASE_URL
|
||||
value: https://recipes.kluster.moll.re
|
||||
volumeMounts:
|
||||
- name: mealie-data
|
||||
mountPath: /app/data
|
||||
|
||||
volumes:
|
||||
- name: mealie-data
|
||||
persistentVolumeClaim:
|
||||
claimName: mealie-data
|
@@ -1,15 +1,16 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: homarr-ingress
|
||||
name: mealie-ingressroute
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`start.kluster.moll.re`)
|
||||
- match: Host(`recipes.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: homarr
|
||||
port: 7575
|
||||
- name: mealie-web
|
||||
port: 9000
|
||||
tls:
|
||||
certResolver: default-tls
|
16
apps/recipes/kustomization.yaml
Normal file
16
apps/recipes/kustomization.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: recipes
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- deployment.yaml
|
||||
- pvc.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
|
||||
images:
|
||||
- name: mealie
|
||||
newTag: v1.2.0
|
||||
newName: ghcr.io/mealie-recipes/mealie
|
4
apps/recipes/namespace.yaml
Normal file
4
apps/recipes/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
12
apps/recipes/pvc.yaml
Normal file
12
apps/recipes/pvc.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mealie-data
|
||||
spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
volumeMode: Filesystem
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
10
apps/recipes/service.yaml
Normal file
10
apps/recipes/service.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mealie-web
|
||||
spec:
|
||||
selector:
|
||||
app: mealie
|
||||
ports:
|
||||
- port: 9000
|
||||
targetPort: 9000
|
@@ -14,4 +14,3 @@ spec:
|
||||
port: 80
|
||||
tls:
|
||||
certResolver: default-tls
|
||||
|
@@ -9,4 +9,3 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
|
35
apps/syncthing/deployment.yaml
Normal file
35
apps/syncthing/deployment.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: syncthing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: syncthing
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
containers:
|
||||
- name: syncthing
|
||||
image: syncthing
|
||||
resources:
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
ports:
|
||||
- containerPort: 8384
|
||||
protocol: TCP
|
||||
name: syncthing-web
|
||||
- containerPort: 22000
|
||||
protocol: TCP
|
||||
- containerPort: 22000
|
||||
protocol: UDP
|
||||
volumeMounts:
|
||||
- name: persistence
|
||||
mountPath: /var/syncthing
|
||||
volumes:
|
||||
- name: persistence
|
||||
persistentVolumeClaim:
|
||||
claimName: syncthing-claim
|
16
apps/syncthing/ingress.yaml
Normal file
16
apps/syncthing/ingress.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: rss-ingressroute
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`syncthing.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: syncthing-web
|
||||
port: 8384
|
||||
tls:
|
||||
certResolver: default-tls
|
18
apps/syncthing/kustomization.yaml
Normal file
18
apps/syncthing/kustomization.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: syncthing
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
- servicemonitor.yaml
|
||||
- syncthing-api.sealedsecret.yaml
|
||||
|
||||
images:
|
||||
- name: syncthing
|
||||
newName: syncthing/syncthing
|
||||
newTag: "1.27"
|
4
apps/syncthing/namespace.yaml
Normal file
4
apps/syncthing/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
11
apps/syncthing/pvc.yaml
Normal file
11
apps/syncthing/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: syncthing-claim
|
||||
spec:
|
||||
storageClassName: nfs-client
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
46
apps/syncthing/service.yaml
Normal file
46
apps/syncthing/service.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-web
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8384
|
||||
targetPort: 8384
|
||||
name: syncthing-web
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-listen
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: syncthing-service
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: LoadBalancer
|
||||
LoadBalancerIP: 192.168.3.4
|
||||
ports:
|
||||
- port: 22000
|
||||
targetPort: 22000
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: syncthing-discover
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: syncthing-service
|
||||
spec:
|
||||
selector:
|
||||
app: syncthing
|
||||
type: LoadBalancer
|
||||
LoadBalancerIP: 192.168.3.4
|
||||
ports:
|
||||
- port: 22000
|
||||
targetPort: 22000
|
||||
protocol: UDP
|
17
apps/syncthing/servicemonitor.yaml
Normal file
17
apps/syncthing/servicemonitor.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: syncthing-servicemonitor
|
||||
labels:
|
||||
app: syncthing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: syncthing
|
||||
endpoints:
|
||||
- port: syncthing-web
|
||||
path: /metrics
|
||||
bearerTokenSecret:
|
||||
name: syncthing-api
|
||||
key: token
|
||||
namespace: syncthing
|
16
apps/syncthing/syncthing-api.sealedsecret.yaml
Normal file
16
apps/syncthing/syncthing-api.sealedsecret.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: syncthing-api
|
||||
namespace: syncthing
|
||||
spec:
|
||||
encryptedData:
|
||||
token: AgC1hG1aguLIWBgA1R19MGrXDq7BONAldMEXtCeGXLO9Xar08f7qFqprtRJAMOID4trUEBMAkF96m7rH7QHTpO0WzRLrJctLi7U6NgESUJBDxusqjij3RAANS69Xt27mu2oa+rhm605CfFJT6Gpx/2CxrFtUD3yCijilDnEVvw4WvTLHvVQMCd8cM8ZDlpBsSYbxvtCUN1+B02DCucLpMphspxV2SGPAdc04xQD7d0vUhNLekFi0xSgu0jiRGVDHOG5Egd9d/BGeNOBgiUVxJxqqdXc6EmkslcSUtMQJ5luSxjogf+p3jdOqt4aPpUeR8sSPb6OSEIZD/Cfs9X4akHdpUAqkycu+V24lDxeHWAtIviCMBPttrwNAEytgwqaiT0U4UmL5GqR97jpmy3Tx+jYKuXkt4Igb6VByreuL9aZacRrqRhCCgbg95Y/UrYlLAbZYOI/+KsFzB5akGpZXUDcW9h2IkTUmcT+QxWXqEoNpoTI5qAnKiu/9T5elDKghjMHYX+CnPj+rXlQIJzX7NkZ0Q6HpKQ4B2Vd1Ewkvadf963jBodUe7WiMt8UeYgzCa33F4U23JjExIrL8t3r8MQ/IIdtfUvyz6Da1vp5hjpBUnUCk8rca/6VC3GO1GP3DLdIXiZQY1OOTHJlyLG7+bIL35zVfkmLMzmlIdaFsfeYiL4P+hYRbLABPAJk8lY7MEdiczpvI9HlmFVatJaPrFJwx9jyhzqIOq5eGt0OIkFt+fw==
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: syncthing-api
|
||||
namespace: syncthing
|
||||
type: Opaque
|
@@ -3,11 +3,13 @@ kind: IngressRoute
|
||||
metadata:
|
||||
namespace: whoami
|
||||
name: whoami-ingressroute
|
||||
annotations:
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`whoami.kluster.moll.re`)
|
||||
- match: Host(`whoami.kluster.moll.re`) || Host(`homepage.kluster.moll.re`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: whoami
|
||||
|
16
infrastructure/external-dns/cloudflare-api.sealedsecret.yaml
Normal file
16
infrastructure/external-dns/cloudflare-api.sealedsecret.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cloudflare-api
|
||||
namespace: external-dns
|
||||
spec:
|
||||
encryptedData:
|
||||
CLOUDFLARE_TOKEN: AgA+HAbpfu4MUK152g2XuWnCoLflCgp4C5gpUWy/IRCETyhcaP5SefAPGC/TGdGPTsZWoDO9qIDAdEiFw4Aw5idfBOm1Ql7vLunWeqNysirU9QJIbL21Fb8+UafrLnAGQySjzAT4MyK5yntn3T8l568cR22jPQi5a0CqL91jGXBeANkTQlokMFJCYkYsaqhCirSDlldvVrJGlWg+T1odyqyytIOO9OaYNt0jA1NisKpLWcLPwYcVkf0ntdCSQaloMX/LeoY16kECOVMYrVPIqGbOhCvAehpjyXxydFjyaYIV5p5hlKD0Sjlpc9zTCTFF7KUddNU9m2GhJqKT8bZm0d2g1yth4dNLgbUSp1nU31vpRalRJYXBVwPVei0lSGL7Jkb9LzCRHxL4J8hP/AeYrntpoAqMDxZsMZSpUnbTQklT2WyvIzyhpiNtEFrH8P6CYq61dWENXWkMwDqzKfM7Xlg9ifW6YzTQfsoo/OWhtWRmLDNVrXwhZqRWb7UjYr6xGPAzc/I4H2SJk5HLubylaXY3I2X2dWy+YTttiUuzQl0YfzrADAlu8ZWPiAfLqGmKnOR0STqeCvAT6ya8Ky09aY5GWLdTJfTayivGA0PvRJE2idf/VtpVDBERN5lCDHLBrRvU8o5wBlkTxU4B8zHmF+pu7zO3bA8IOUpyQMnlFzZTOI1s0Tl6XiRmy+WnA4tyxjrEse75BvS3WGCnaezQJW9gpF1/UC18vys46f9Jn5FHfm4lMpOscSIy
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cloudflare-api
|
||||
namespace: external-dns
|
||||
type: Opaque
|
63
infrastructure/external-dns/cronjob.yaml
Normal file
63
infrastructure/external-dns/cronjob.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: octodns-cronjob
|
||||
spec:
|
||||
schedule: "0 */6 * * *"
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
|
||||
template:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git
|
||||
image: git
|
||||
command: ["git"]
|
||||
args:
|
||||
- clone
|
||||
- https://git.kluster.moll.re/remoll/dns.git
|
||||
- /etc/octodns
|
||||
volumeMounts:
|
||||
- name: octodns-config
|
||||
mountPath: /etc/octodns
|
||||
containers:
|
||||
- name: octodns
|
||||
image: octodns
|
||||
env:
|
||||
# - name: CLOUDFLARE_ACCOUNT_ID
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: cloudflare-api
|
||||
# key: CLOUDFLARE_ACCOUNT_ID
|
||||
- name: CLOUDFLARE_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: cloudflare-api
|
||||
key: CLOUDFLARE_TOKEN
|
||||
# - name: CLOUDFLARE_EMAIL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: cloudflare-api
|
||||
# key: CLOUDFLARE_EMAIL
|
||||
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
cd /etc/octodns
|
||||
&&
|
||||
pip install -r ./requirements.txt
|
||||
&&
|
||||
octodns-sync --config-file ./config.yaml --doit
|
||||
&&
|
||||
echo "done..."
|
||||
volumeMounts:
|
||||
- name: octodns-config
|
||||
mountPath: /etc/octodns
|
||||
|
||||
volumes:
|
||||
- name: octodns-config
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
18
infrastructure/external-dns/kustomization.yaml
Normal file
18
infrastructure/external-dns/kustomization.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: external-dns
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- cloudflare-api.sealedsecret.yaml
|
||||
- cronjob.yaml
|
||||
|
||||
images:
|
||||
- name: octodns
|
||||
newName: octodns/octodns # has all plugins
|
||||
newTag: "2024.02"
|
||||
|
||||
- name: git
|
||||
newName: alpine/git
|
||||
newTag: "2.43.0"
|
4
infrastructure/external-dns/namespace.yaml
Normal file
4
infrastructure/external-dns/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: placeholder
|
1
infrastructure/external-dns/octodns
Submodule
1
infrastructure/external-dns/octodns
Submodule
Submodule infrastructure/external-dns/octodns added at ba74e801c1
@@ -22,6 +22,8 @@ spec:
|
||||
value: ":80"
|
||||
- name: DRONE_GITEA_SERVER
|
||||
value: https://git.kluster.moll.re
|
||||
- name: DRONE_USER_CREATE
|
||||
value: username:remoll,admin:true
|
||||
- name: DRONE_GITEA_CLIENT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user