Compare commits

..

1 Commits

Author SHA1 Message Date
3aa95f93e1 add headscale 2025-04-24 22:52:22 +02:00
44 changed files with 654 additions and 258 deletions

View File

@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.62
newTag: v0.107.61
namespace: adguard

View File

@ -12,4 +12,4 @@ namespace: audiobookshelf
images:
- name: audiobookshelf
newName: ghcr.io/advplyr/audiobookshelf
newTag: "2.24.0"
newTag: "2.20.0"

View File

@ -12,4 +12,4 @@ namespace: code-server
images:
- name: code-server
newName: ghcr.io/coder/code-server
newTag: 4.100.3-fedora
newTag: 4.99.3-fedora

View File

@ -13,4 +13,4 @@ namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "7.1.3"
newTag: "7.1.2"

View File

@ -13,4 +13,4 @@ resources:
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 25.5.0
newTag: 25.4.0

View File

@ -17,5 +17,5 @@ helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 9.2.2
version: 8.12.1
valuesFile: grafana.values.yaml

View File

@ -15,4 +15,4 @@ resources:
images:
- name: homeassistant
newName: homeassistant/home-assistant
newTag: "2025.5"
newTag: "2025.4"

View File

@ -15,16 +15,16 @@ namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.9.3
version: 0.9.2
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.132.3
newTag: v1.130.3
- name: ghcr.io/immich-app/immich-server
newTag: v1.132.3
newTag: v1.130.3
patches:

View File

@ -1,10 +0,0 @@
{
"packageRules": [
{
"matchDatasources": ["docker"],
"matchPackagePrefixes": ["ghcr.io/immich-app/"],
"groupName": "Immich containers",
"groupSlug": "immich-app-images"
}
]
}

View File

@ -14,4 +14,4 @@ namespace: kitchenowl
images:
- name: kitchenowl
newName: tombursch/kitchenowl
newTag: v0.6.15
newTag: v0.6.11

View File

@ -13,4 +13,4 @@ namespace: linkding
images:
- name: linkding
newName: sissbruecker/linkding
newTag: "1.40.0"
newTag: "1.39.1"

View File

@ -1,11 +1,3 @@
## Setup
Because minecraft is quite sensitive to io performance, we want the data to be stored on a local disk. But hostpath is not well supported in talos (and is not persistent), so we use an ephemeral volume instead. In order to do this, we create an emptyDir volume and mount it to the pod.
We use an initContaier that copies the data to the local storage. Afterwards, copying from the local storage back to the persistent storage is handled by a preStop lifecycle event.
This way, we can have the best of both worlds: fast local storage and persistent storage.
## Sending a command
```
kubectl exec -it -n minecraft deploy/minecraft-server -- /bin/bash

View File

@ -9,16 +9,6 @@ spec:
app: minecraft-server
spec:
restartPolicy: OnFailure
initContainers:
- name: copy-data-to-local
image: alpine
command: ["/bin/sh"]
args: ["-c", "cp -r /data/* /local-data/"]
volumeMounts:
- name: local-data
mountPath: /local-data
- name: minecraft-data
mountPath: /data
containers:
- name: minecraft-server
image: minecraft
@ -59,34 +49,12 @@ spec:
value: "false"
- name: ENABLE_AUTOSTOP
value: "true"
- name: AUTOSTOP_TIMEOUT_EST
value: "1800" # stop 30 min after last disconnect
volumeMounts:
- name: local-data
mountPath: /data
- name: copy-data-to-persistent
image: rsync
command: ["/bin/sh"]
# args: ["-c", "sleep infinity"]
args: ["/run-rsync.sh"]
volumeMounts:
- name: local-data
mountPath: /local-data
- name: minecraft-data
mountPath: /persistent-data
- name: rsync-config
mountPath: /run-rsync.sh
subPath: run-rsync.sh
mountPath: /data
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data
- name: local-data
emptyDir: {}
- name: rsync-config
configMap:
name: rsync-config
defaultMode: 0777

View File

@ -8,7 +8,6 @@ resources:
- pvc.yaml
- job.yaml
- service.yaml
- rsync.configmap.yaml
- curseforge.sealedsecret.yaml
@ -16,9 +15,3 @@ images:
- name: minecraft
newName: itzg/minecraft-server
newTag: java21
- name: alpine
newName: alpine
newTag: "3.21"
- name: rsync
newName: eeacms/rsync
newTag: "2.6"

View File

@ -1,42 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: rsync-config
data:
run-rsync.sh: |-
#!/bin/sh
set -eu
echo "Starting rsync..."
no_change_count=0
while [ "$no_change_count" -lt 3 ]; do
# use the i flag to get per line output of each change
rsync_output=$(rsync -avzi --delete /local-data/ /persistent-data/)
# echo "$rsync_output"
# in this format rsync outputs at least 4 lines:
# ---
# sending incremental file list
#
# sent 145,483 bytes received 717 bytes 26,581.82 bytes/sec
# total size is 708,682,765 speedup is 4,847.35
# ---
# even though a non-zero number of bytes is sent, no changes were made
line_count=$(echo "$rsync_output" | wc -l)
if [ "$line_count" -eq 4 ]; then
echo "Rsync output was: $rsync_output"
no_change_count=$((no_change_count + 1))
echo "No changes detected. Incrementing no_change_count to $no_change_count."
else
no_change_count=0
echo "Changes detected. Resetting no_change_count to 0."
fi
echo "Rsync completed. Sleeping for 10 minutes..."
sleep 600
done
echo "No changes detected for 3 consecutive runs. Exiting."

View File

@ -14,14 +14,14 @@ namespace: paperless
images:
- name: paperless
newName: ghcr.io/paperless-ngx/paperless-ngx
newTag: "2.16.2"
newTag: "2.15.3"
helmCharts:
- name: redis
releaseName: redis
repo: https://charts.bitnami.com/bitnami
version: 21.1.11
version: 20.13.0
valuesInline:
auth:
enabled: false

View File

@ -1,48 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stump
spec:
replicas: 1
selector:
matchLabels:
app: stump
template:
metadata:
labels:
app: stump
spec:
containers:
- name: stump
image: stump
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 10801
envFrom:
- configMapRef:
name: stump-config
volumeMounts:
- name: stump-data
mountPath: /data
- name: stump-config
mountPath: /config
volumes:
- name: stump-config
persistentVolumeClaim:
claimName: stump-config
- name: stump-data
persistentVolumeClaim:
claimName: stump-data

View File

@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- stump-config.configmap.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: stump
images:
- name: stump
newName: aaronleopold/stump
newTag: "0.0.10"

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stump-web
spec:
selector:
app: stump
ports:
- port: 10801
targetPort: 10801

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: stump-config
data:
STUMP_ENABLE_UPLOAD: "true"
STUMP_CONFIG_DIR: /config
ENABLE_KOREADER_SYNC: "true"

View File

@ -7,5 +7,4 @@ data:
# switch to annotation based resource tracking as per
# https://argo-cd.readthedocs.io/en/stable/user-guide/resource_tracking/
application.resourceTrackingMethod: annotation+label
# disable admin user - use oidc
admin.enabled: "false"

View File

@ -4,12 +4,14 @@ kind: Kustomization
namespace: argocd
resources:
- namespace.yaml
- https://github.com/argoproj/argo-cd//manifests/cluster-install?timeout=120&ref=v2.13.3
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.13.3/manifests/install.yaml
- ingress.yaml
- argo-apps.application.yaml
- bootstrap-repo.sealedsecret.yaml
- argocd-oauth.sealedsecret.yaml
- servicemonitor.yaml
# DID NOT FIX RELOAD LOOPS
# - github.com/argoproj/argo-cd/examples/k8s-rbac/argocd-server-applications?ref=master
patches:

View File

@ -27,6 +27,6 @@ images:
helmCharts:
- name: authelia
releaseName: authelia
version: 0.10.10
version: 0.10.4
repo: https://charts.authelia.com
valuesFile: authelia.values.yaml

View File

@ -11,7 +11,7 @@ resources:
images:
- name: octodns
newName: octodns/octodns # has all plugins
newTag: "2025.05"
newTag: "2025.04"
- name: git
newName: alpine/git

View File

@ -170,7 +170,5 @@ postgresql:
enabled: false
postgresql-ha:
enabled: false
valkey:
enabled: false
valkey-cluster:
redis-cluster:
enabled: false

View File

@ -23,6 +23,6 @@ helmCharts:
- name: gitea
namespace: gitea # needs to be set explicitly for svc to be referenced correctly
releaseName: gitea
version: 12.0.0
version: 11.0.1
valuesFile: gitea.values.yaml
repo: https://dl.gitea.io/charts/

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: headscale
labels:
app: headscale
spec:
selector:
matchLabels:
app: headscale
replicas: 1
template:
metadata:
labels:
app: headscale
spec:
shareProcessNamespace: true
serviceAccountName: default
containers:
- name: headplane
image: headplane
env:
# Set these if the pod name for Headscale is not static
# We will use the downward API to get the pod name instead
- name: HEADPLANE_LOAD_ENV_OVERRIDES
value: 'true'
- name: 'HEADPLANE_INTEGRATION__KUBERNETES__POD_NAME'
valueFrom:
fieldRef:
fieldPath: metadata.name
ports:
- containerPort: 3000
volumeMounts:
- name: headscale-config
mountPath: /etc/headscale/config.yaml
subPath: config.yaml
- name: headplane-config
mountPath: /etc/headplane/config.yaml
subPath: config.yaml
- name: headplane-data
mountPath: /var/lib/headplane
- name: headscale
image: headscale
args: ["serve"]
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
# env:
ports:
- containerPort: 8080
volumeMounts:
- name: headscale-config
mountPath: /etc/headscale/config.yaml
subPath: config.yaml
- mountPath: /persistence
name: headscale-data
terminationGracePeriodSeconds: 30
volumes:
- name: headscale-config
configMap:
name: headscale-config
- name: headscale-data
persistentVolumeClaim:
claimName: headscale-data
- name: headplane-config
configMap:
name: headplane-config
- name: headplane-data
persistentVolumeClaim:
claimName: headplane-data

View File

@ -0,0 +1,99 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: headplane-config
data:
config.yaml: |
# Configuration for the Headplane server and web application
server:
host: "0.0.0.0"
port: 3000
# The secret used to encode and decode web sessions
# Ensure that this is exactly 32 characters long
cookie_secret: "<change_me_to_something_secure!>"
# Should the cookies only work over HTTPS?
# Set to false if running via HTTP without a proxy
# (I recommend this is true in production)
cookie_secure: true
# Headscale specific settings to allow Headplane to talk
# to Headscale and access deep integration features
headscale:
# The URL to your Headscale instance
# (All API requests are routed through this URL)
# (THIS IS NOT the gRPC endpoint, but the HTTP endpoint)
#
# IMPORTANT: If you are using TLS this MUST be set to `https://`
url: "http://0.0.0.0:8080"
# If you use the TLS configuration in Headscale, and you are not using
# Let's Encrypt for your certificate, pass in the path to the certificate.
# (This has no effect `url` does not start with `https://`)
# tls_cert_path: "/var/lib/headplane/tls.crt"
# Optional, public URL if they differ
# This affects certain parts of the web UI
# public_url: "https://headscale.example.com"
# Path to the Headscale configuration file
# This is optional, but HIGHLY recommended for the best experience
# If this is read only, Headplane will show your configuration settings
# in the Web UI, but they cannot be changed.
config_path: "/etc/headscale/config.yaml"
# Headplane internally validates the Headscale configuration
# to ensure that it changes the configuration in a safe way.
# If you want to disable this validation, set this to false.
config_strict: true
# Integration configurations for Headplane to interact with Headscale
# Only one of these should be enabled at a time or you will get errors
integration:
kubernetes:
enabled: true
# Validates the manifest for the Pod to ensure all of the criteria
# are set correctly. Turn this off if you are having issues with
# shareProcessNamespace not being validated correctly.
validate_manifest: true
# This should be the name of the Pod running Headscale and Headplane.
# If this isn't static you should be using the Kubernetes Downward API
# to set this value (refer to docs/Integrated-Mode.md for more info).
pod_name: "headscale"
# # OIDC Configuration for simpler authentication
# # (This is optional, but recommended for the best experience)
# oidc:
# issuer: "https://accounts.google.com"
# client_id: "your-client-id"
# # The client secret for the OIDC client
# # Either this or `client_secret_path` must be set for OIDC to work
# client_secret: "<your-client-secret>"
# # You can alternatively set `client_secret_path` to read the secret from disk.
# # The path specified can resolve environment variables, making integration
# # with systemd's `LoadCredential` straightforward:
# # client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# disable_api_key_login: false
# token_endpoint_auth_method: "client_secret_post"
# # If you are using OIDC, you need to generate an API key
# # that can be used to authenticate other sessions when signing in.
# #
# # This can be done with `headscale apikeys create --expiration 999d`
# headscale_api_key: "<your-headscale-api-key>"
# # Optional, but highly recommended otherwise Headplane
# # will attempt to automatically guess this from the issuer
# #
# # This should point to your publicly accessibly URL
# # for your Headplane instance with /admin/oidc/callback
# redirect_uri: "http://localhost:3000/admin/oidc/callback"
# # Stores the users and their permissions for Headplane
# # This is a path to a JSON file, default is specified below.
# user_storage_file: "/var/lib/headplane/users.json"

View File

@ -0,0 +1,376 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: headscale-config
data:
config.yaml: |
server_url: http://127.0.0.1:8080
# Address to listen to / bind to on the server
#
# For production:
listen_addr: 0.0.0.0:8080
# Address to listen to /metrics and /debug, you may want
# to keep this endpoint private to your internal network
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the traffic between headscale and
# Tailscale clients when using the new Noise-based protocol. A missing key
# will be automatically generated.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given IP.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP and
# Tailscale clients. A missing key will be automatically generated.
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
database:
# Database type. Available options: sqlite, postgres
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# All new development, testing and optimisations are done with SQLite in mind.
type: sqlite
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
debug: false
# GORM configuration settings.
gorm:
# Enable prepared statements.
prepare_stmt: true
# Enable parameterized queries.
parameterized_queries: true
# Skip logging "record not found" errors.
skip_err_record_not_found: true
# Threshold for slow queries in milliseconds.
slow_threshold: 1000
# SQLite config
sqlite:
path: /persistence/db.sqlite
# Enable WAL mode for SQLite. This is recommended for production environments.
# https://www.sqlite.org/wal.html
write_ahead_log: true
# Maximum number of WAL file frames before the WAL file is automatically checkpointed.
# https://www.sqlite.org/c3ref/wal_autocheckpoint.html
# Set to 0 to disable automatic checkpointing.
wal_autocheckpoint: 1000
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See: docs/ref/tls.md for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
## Policy
# headscale supports Tailscale's ACL policies.
# Please have a look to their KB to better
# understand the concepts: https://tailscale.com/kb/1018/acls/
policy:
# The mode can be "file" or "database" that defines
# where the ACL policies are stored and read from.
mode: file
# If the mode is set to "file", the path to a
# HuJSON file containing ACL policies.
path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
# Please note that for the DNS configuration to have any effect,
# clients must have the `--accept-dns=true` option enabled. This is the
# default for the Tailscale client. This option is enabled by default
# in the Tailscale client.
#
# Setting _any_ of the configuration and `--accept-dns=true` on the
# clients will integrate with the DNS manager on the client or
# overwrite /etc/resolv.conf.
# https://tailscale.com/kb/1235/resolv-conf
#
# If you want stop Headscale from managing the DNS configuration
# all the fields under `dns` should be set to empty values.
dns:
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# This domain _must_ be different from the server_url domain.
# `base_domain` must be a FQDN, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: example.com
# List of DNS servers to expose to clients.
nameservers:
global:
- 1.1.1.1
- 1.0.0.1
- 2606:4700:4700::1111
- 2606:4700:4700::1001
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
# - https://dns.nextdns.io/abc123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# a map of domains and which DNS server to use for each.
split:
{}
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Set custom DNS search domains. With MagicDNS enabled,
# your tailnet base_domain is always the first search domain.
search_domains: []
# Extra DNS records
# so far only A and AAAA records are supported (on the tailscale side)
# See: docs/ref/dns.md
extra_records: []
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
#
# Alternatively, extra DNS records can be loaded from a JSON file.
# Headscale processes this file on each change.
# extra_records_path: /var/lib/headscale/extra-records.json
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # Optional: PKCE (Proof Key for Code Exchange) configuration
# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
# # by preventing authorization code interception attacks
# # See https://datatracker.ietf.org/doc/html/rfc7636
# pkce:
# # Enable or disable PKCE support (default: false)
# enabled: false
# # PKCE method to use:
# # - plain: Use plain code verifier
# # - S256: Use SHA256 hashed code verifier (default, recommended)
# method: S256
#
# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
# # by taking the username from the legacy user and matching it with the username
# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
# # to force them using the unique identifier from the OIDC and to give them a
# # proper display name and picture if available.
# # Note that this will only work if the username from the legacy user is the same
# # and there is a possibility for account takeover should a username have changed
# # with the provider.
# # When this feature is disabled, it will cause all new logins to be created as new users.
# # Note this option will be removed in the future and should be set to false
# # on all new installations, or when all users have logged in with OIDC once.
# map_legacy_users: false
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false

View File

@ -1,17 +1,17 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: stump-ingressroute
name: headscale-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`stump.kluster.moll.re`)
- match: Host(`headscale.kluster.moll.re`)
kind: Rule
services:
- name: stump-web
port: 10801
- name: headscale-web
port: 8080
tls:
certResolver: default-tls

View File

@ -0,0 +1,22 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: headscale
resources:
- namespace.yaml
- headscale-config.configmap.yaml
- headplane-config.configmap.yaml
- pvc.yaml
- deployment.yaml
- serviceaccount.yaml
- service.yaml
- ingress.yaml
images:
- name: headscale
newName: headscale/headscale # has all plugins
newTag: v0.25.1
- name: headplane
newName: ghcr.io/tale/headplane
newTag: "0.5.10"

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@ -1,23 +1,23 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stump-data
name: headscale-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stump-config
name: headplane-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storage: 1Gi

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: headscale-web
spec:
selector:
app: headscale
ports:
- port: 8080
targetPort: 8080

View File

@ -0,0 +1,26 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: headplane-agent
# namespace: default # Adjust namespace as needed
rules:
- apiGroups: ['']
resources: ['pods']
verbs: ['get', 'list']
- apiGroups: ['apps']
resources: ['deployments']
verbs: ['get', 'list']
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: headplane-agent
# namespace: default # Adjust namespace as needed
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: headplane-agent
subjects:
- kind: ServiceAccount
name: default # If you use a different service account, change this
# namespace: default # Adjust namespace as needed

View File

@ -6,7 +6,7 @@ namespace: monitoring
resources:
- namespace.yaml
# prometheus-operator crds
- https://github.com/prometheus-operator/prometheus-operator?ref=v0.82.2
- https://github.com/prometheus-operator/prometheus-operator?ref=v0.82.0
# single prometheus instance with a thanos sidecar
- prometheus.yaml
- thanos-store.statefulset.yaml
@ -24,10 +24,10 @@ helmCharts:
- name: loki
releaseName: loki
repo: https://grafana.github.io/helm-charts
version: 6.30.1
version: 6.29.0
valuesFile: loki.values.yaml
- name: prometheus-node-exporter
releaseName: prometheus-node-exporter
repo: https://prometheus-community.github.io/helm-charts
version: 4.46.1
version: 4.45.2
valuesFile: prometheus-node-exporter.values.yaml

View File

@ -9,6 +9,6 @@ namespace: pg-ha
helmCharts:
- name: cloudnative-pg
releaseName: pg-controller
version: 0.24.0
version: 0.23.2
valuesFile: values.yaml
repo: https://cloudnative-pg.io/charts/

View File

@ -11,4 +11,4 @@ resources:
images:
- name: renovate/renovate
newName: renovate/renovate
newTag: "40"
newTag: "39"

View File

@ -13,6 +13,6 @@ namespace: traefik-system
helmCharts:
- name: traefik
releaseName: traefik
version: 35.4.0
version: 35.0.1
valuesFile: values.yaml
repo: https://traefik.github.io/charts

View File

@ -41,6 +41,5 @@ resources:
- paperless/
- recipes/
- rss/
- stump/
- todos/
- whoami/

View File

@ -1,18 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: stump-application
spec:
project: apps
destination:
server: https://kubernetes.default.svc
namespace: stump
syncPolicy:
automated:
prune: true
selfHeal: true
sources:
- repoURL: ssh://git@git.kluster.moll.re:2222/remoll/k3s-infra.git
targetRevision: main
path: apps/stump

View File

@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- application.yaml

View File

@ -1,14 +1,4 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"dependencyDashboard": true,
"extends": [
"local>remoll/k3s-infra//apps/immich/renovate.json"
],
"packageRules": [
{
"matchUpdateTypes": ["patch"],
"automerge": true,
"ignoreTests": true
}
]
"dependencyDashboard": true
}