initial migration
This commit is contained in:
		
							
								
								
									
										17
									
								
								apps/monitoring/grafana.ingress.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								apps/monitoring/grafana.ingress.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| kind: IngressRoute | ||||
| apiVersion: traefik.containo.us/v1alpha1 | ||||
| metadata: | ||||
|   name: grafana-ingress | ||||
| spec: | ||||
|   entryPoints: | ||||
|     - websecure | ||||
|   routes: | ||||
|     - match: Host(`grafana.kluster.moll.re`) | ||||
|       kind: Rule | ||||
|       services: | ||||
|         - name: grafana | ||||
|           port: 80 | ||||
|   tls: | ||||
|     certResolver: default-tls | ||||
|  | ||||
|  | ||||
							
								
								
									
										35
									
								
								apps/monitoring/grafana.pvc.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								apps/monitoring/grafana.pvc.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: PersistentVolume | ||||
| metadata: | ||||
|   name: grafana-nfs | ||||
|   labels: | ||||
|     directory: grafana | ||||
| spec: | ||||
|   storageClassName: slow | ||||
|   capacity: | ||||
|     storage: "1Gi" | ||||
|   volumeMode: Filesystem | ||||
|   accessModes: | ||||
|     - ReadWriteOnce | ||||
|   nfs: | ||||
|     path: /export/kluster/grafana | ||||
|     server: 192.168.1.157 | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: PersistentVolumeClaim | ||||
| metadata: | ||||
|   name: grafana-nfs | ||||
| spec: | ||||
|   storageClassName: slow | ||||
|   accessModes: | ||||
|     - ReadWriteOnce | ||||
|   resources: | ||||
|     requests: | ||||
|       storage: "1Gi" | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       directory: grafana | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										873
									
								
								apps/monitoring/grafana.values.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										873
									
								
								apps/monitoring/grafana.values.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,873 @@ | ||||
| rbac: | ||||
|   create: true | ||||
|   ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) | ||||
|   # useExistingRole: name-of-some-(cluster)role | ||||
|   pspEnabled: true | ||||
|   pspUseAppArmor: true | ||||
|   namespaced: false | ||||
|   extraRoleRules: [] | ||||
|   # - apiGroups: [] | ||||
|   #   resources: [] | ||||
|   #   verbs: [] | ||||
|   extraClusterRoleRules: [] | ||||
|   # - apiGroups: [] | ||||
|   #   resources: [] | ||||
|   #   verbs: [] | ||||
| serviceAccount: | ||||
|   create: true | ||||
|   name: | ||||
|   nameTest: | ||||
| ## Service account annotations. Can be templated. | ||||
| #  annotations: | ||||
| #    eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here | ||||
|   autoMount: true | ||||
|  | ||||
| replicas: 1 | ||||
|  | ||||
| ## Create a headless service for the deployment | ||||
| headlessService: false | ||||
|  | ||||
| ## Create HorizontalPodAutoscaler object for deployment type | ||||
| # | ||||
| autoscaling: | ||||
|   enabled: false | ||||
| #   minReplicas: 1 | ||||
| #   maxReplicas: 10 | ||||
| #   metrics: | ||||
| #   - type: Resource | ||||
| #     resource: | ||||
| #       name: cpu | ||||
| #       targetAverageUtilization: 60 | ||||
| #   - type: Resource | ||||
| #     resource: | ||||
| #       name: memory | ||||
| #       targetAverageUtilization: 60 | ||||
|  | ||||
| ## See `kubectl explain poddisruptionbudget.spec` for more | ||||
| ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ | ||||
| podDisruptionBudget: {} | ||||
| #  minAvailable: 1 | ||||
| #  maxUnavailable: 1 | ||||
|  | ||||
| ## See `kubectl explain deployment.spec.strategy` for more | ||||
| ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy | ||||
| deploymentStrategy: | ||||
|   type: RollingUpdate | ||||
|  | ||||
| readinessProbe: | ||||
|   httpGet: | ||||
|     path: /api/health | ||||
|     port: 3000 | ||||
|  | ||||
| livenessProbe: | ||||
|   httpGet: | ||||
|     path: /api/health | ||||
|     port: 3000 | ||||
|   initialDelaySeconds: 60 | ||||
|   timeoutSeconds: 30 | ||||
|   failureThreshold: 10 | ||||
|  | ||||
| ## Use an alternate scheduler, e.g. "stork". | ||||
| ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ | ||||
| ## | ||||
| # schedulerName: "default-scheduler" | ||||
|  | ||||
| image: | ||||
|   repository: grafana/grafana | ||||
|   tag: 9.0.2 | ||||
|   sha: "" | ||||
|   pullPolicy: IfNotPresent | ||||
|  | ||||
|   ## Optionally specify an array of imagePullSecrets. | ||||
|   ## Secrets must be manually created in the namespace. | ||||
|   ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | ||||
|   ## Can be templated. | ||||
|   ## | ||||
|   # pullSecrets: | ||||
|   #   - myRegistrKeySecretName | ||||
|  | ||||
| testFramework: | ||||
|   enabled: true | ||||
|   image: "bats/bats" | ||||
|   tag: "v1.4.1" | ||||
|   imagePullPolicy: IfNotPresent | ||||
|   securityContext: {} | ||||
|  | ||||
| securityContext: | ||||
|   runAsUser: 472 | ||||
|   runAsGroup: 472 | ||||
|   fsGroup: 472 | ||||
|  | ||||
| containerSecurityContext: | ||||
|   {} | ||||
|  | ||||
| # Extra configmaps to mount in grafana pods | ||||
| # Values are templated. | ||||
| extraConfigmapMounts: [] | ||||
|   # - name: certs-configmap | ||||
|   #   mountPath: /etc/grafana/ssl/ | ||||
|   #   subPath: certificates.crt # (optional) | ||||
|   #   configMap: certs-configmap | ||||
|   #   readOnly: true | ||||
|  | ||||
|  | ||||
| extraEmptyDirMounts: [] | ||||
|   # - name: provisioning-notifiers | ||||
|   #   mountPath: /etc/grafana/provisioning/notifiers | ||||
|  | ||||
|  | ||||
| # Apply extra labels to common labels. | ||||
| extraLabels: {} | ||||
|  | ||||
| ## Assign a PriorityClassName to pods if set | ||||
| # priorityClassName: | ||||
|  | ||||
| downloadDashboardsImage: | ||||
|   repository: curlimages/curl | ||||
|   tag: 7.73.0 | ||||
|   sha: "" | ||||
|   pullPolicy: IfNotPresent | ||||
|  | ||||
| downloadDashboards: | ||||
|   env: {} | ||||
|   envFromSecret: "" | ||||
|   resources: {} | ||||
|  | ||||
| ## Pod Annotations | ||||
| # podAnnotations: {} | ||||
|  | ||||
| ## Pod Labels | ||||
| # podLabels: {} | ||||
|  | ||||
| podPortName: grafana | ||||
|  | ||||
| ## Deployment annotations | ||||
| # annotations: {} | ||||
|  | ||||
| ## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). | ||||
| ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. | ||||
| ## ref: http://kubernetes.io/docs/user-guide/services/ | ||||
| ## | ||||
| service: | ||||
|   enabled: true | ||||
|   type: ClusterIP | ||||
|   port: 80 | ||||
|   targetPort: 3000 | ||||
|     # targetPort: 4181 To be used with a proxy extraContainer | ||||
|   annotations: {} | ||||
|   labels: {} | ||||
|   portName: service | ||||
|  | ||||
| serviceMonitor: | ||||
|   ## If true, a ServiceMonitor CRD is created for a prometheus operator | ||||
|   ## https://github.com/coreos/prometheus-operator | ||||
|   ## | ||||
|   enabled: false | ||||
|   path: /metrics | ||||
|   #  namespace: monitoring  (defaults to use the namespace this chart is deployed to) | ||||
|   labels: {} | ||||
|   interval: 1m | ||||
|   scheme: http | ||||
|   tlsConfig: {} | ||||
|   scrapeTimeout: 30s | ||||
|   relabelings: [] | ||||
|  | ||||
| extraExposePorts: [] | ||||
|  # - name: keycloak | ||||
|  #   port: 8080 | ||||
|  #   targetPort: 8080 | ||||
|  #   type: ClusterIP | ||||
|  | ||||
| # overrides pod.spec.hostAliases in the grafana deployment's pods | ||||
| hostAliases: [] | ||||
|   # - ip: "1.2.3.4" | ||||
|   #   hostnames: | ||||
|   #     - "my.host.com" | ||||
|  | ||||
| ingress: | ||||
|   enabled: true | ||||
|   # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName | ||||
|   # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress | ||||
|   # ingressClassName: nginx | ||||
|   # Values can be templated | ||||
|   annotations: { | ||||
|     kubernetes.io/ingress.class: nginx, | ||||
|     cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod | ||||
|   } | ||||
|     # kubernetes.io/ingress.class: nginx | ||||
|     # kubernetes.io/tls-acme: "true" | ||||
|   labels: {} | ||||
|   path: / | ||||
|  | ||||
|   # pathType is only for k8s >= 1.1= | ||||
|   pathType: Prefix | ||||
|  | ||||
|   hosts: | ||||
|     - grafana.kluster.moll.re | ||||
|   ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. | ||||
|   extraPaths: [] | ||||
|   # - path: /* | ||||
|   #   backend: | ||||
|   #     serviceName: ssl-redirect | ||||
|   #     servicePort: use-annotation | ||||
|   ## Or for k8s > 1.19 | ||||
|   # - path: /* | ||||
|   #   pathType: Prefix | ||||
|   #   backend: | ||||
|   #     service: | ||||
|   #       name: ssl-redirect | ||||
|   #       port: | ||||
|   #         name: use-annotation | ||||
|  | ||||
|  | ||||
|   tls:  | ||||
|     - hosts: | ||||
|       - grafana.kluster.moll.re | ||||
|       secretName: cloudflare-letsencrypt-issuer-account-key | ||||
|     #  - secretName: chart-example-tls | ||||
|   #    hosts: | ||||
|   #      - chart-example.local | ||||
|  | ||||
| resources: {} | ||||
| #  limits: | ||||
| #    cpu: 100m | ||||
| #    memory: 128Mi | ||||
| #  requests: | ||||
| #    cpu: 100m | ||||
| #    memory: 128Mi | ||||
|  | ||||
| ## Node labels for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
| # | ||||
| nodeSelector: {} | ||||
|  | ||||
| ## Tolerations for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| ## | ||||
| tolerations: [] | ||||
|  | ||||
| ## Affinity for pod assignment (evaluated as template) | ||||
| ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
| ## | ||||
| affinity: {} | ||||
|  | ||||
| ## Additional init containers (evaluated as template) | ||||
| ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ | ||||
| ## | ||||
| extraInitContainers: [] | ||||
|  | ||||
| ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod | ||||
| extraContainers: "" | ||||
| # extraContainers: | | ||||
| # - name: proxy | ||||
| #   image: quay.io/gambol99/keycloak-proxy:latest | ||||
| #   args: | ||||
| #   - -provider=github | ||||
| #   - -client-id= | ||||
| #   - -client-secret= | ||||
| #   - -github-org=<ORG_NAME> | ||||
| #   - -email-domain=* | ||||
| #   - -cookie-secret= | ||||
| #   - -http-address=http://0.0.0.0:4181 | ||||
| #   - -upstream-url=http://127.0.0.1:3000 | ||||
| #   ports: | ||||
| #     - name: proxy-web | ||||
| #       containerPort: 4181 | ||||
|  | ||||
| ## Volumes that can be used in init containers that will not be mounted to deployment pods | ||||
| extraContainerVolumes: [] | ||||
| #  - name: volume-from-secret | ||||
| #    secret: | ||||
| #      secretName: secret-to-mount | ||||
| #  - name: empty-dir-volume | ||||
| #    emptyDir: {} | ||||
|  | ||||
| ## Enable persistence using Persistent Volume Claims | ||||
| ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | ||||
| ## | ||||
| persistence: | ||||
|   type: pvc | ||||
|   enabled: true | ||||
|   # storageClassName: default | ||||
|   accessModes: | ||||
|     - ReadWriteOnce | ||||
|   size: 10Gi | ||||
|   # annotations: {} | ||||
|   finalizers: | ||||
|     - kubernetes.io/pvc-protection | ||||
|   # selectorLabels: {} | ||||
|   ## Sub-directory of the PV to mount. Can be templated. | ||||
|   # subPath: "" | ||||
|   ## Name of an existing PVC. Can be templated. | ||||
|   existingClaim: grafana-nfs | ||||
|  | ||||
|   ## If persistence is not enabled, this allows to mount the | ||||
|   ## local storage in-memory to improve performance | ||||
|   ## | ||||
|   inMemory: | ||||
|     enabled: false | ||||
|     ## The maximum usage on memory medium EmptyDir would be | ||||
|     ## the minimum value between the SizeLimit specified | ||||
|     ## here and the sum of memory limits of all containers in a pod | ||||
|     ## | ||||
|     # sizeLimit: 300Mi | ||||
|  | ||||
| initChownData: | ||||
|   ## If false, data ownership will not be reset at startup | ||||
|   ## This allows the prometheus-server to be run with an arbitrary user | ||||
|   ## | ||||
|   enabled: true | ||||
|  | ||||
|   ## initChownData container image | ||||
|   ## | ||||
|   image: | ||||
|     repository: busybox | ||||
|     tag: "1.31.1" | ||||
|     sha: "" | ||||
|     pullPolicy: IfNotPresent | ||||
|  | ||||
|   ## initChownData resource requests and limits | ||||
|   ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ | ||||
|   ## | ||||
|   resources: {} | ||||
|   #  limits: | ||||
|   #    cpu: 100m | ||||
|   #    memory: 128Mi | ||||
|   #  requests: | ||||
|   #    cpu: 100m | ||||
|   #    memory: 128Mi | ||||
|  | ||||
|  | ||||
| # Administrator credentials when not using an existing secret (see below) | ||||
| adminUser: admin | ||||
| # adminPassword: strongpassword | ||||
|  | ||||
| # Use an existing secret for the admin user. | ||||
| admin: | ||||
|   ## Name of the secret. Can be templated. | ||||
|   existingSecret: "" | ||||
|   userKey: admin-user | ||||
|   passwordKey: admin-password | ||||
|  | ||||
| ## Define command to be executed at startup by grafana container | ||||
| ## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) | ||||
| ## Default is "run.sh" as defined in grafana's Dockerfile | ||||
| # command: | ||||
| # - "sh" | ||||
| # - "/run.sh" | ||||
|  | ||||
| ## Use an alternate scheduler, e.g. "stork". | ||||
| ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ | ||||
| ## | ||||
| # schedulerName: | ||||
|  | ||||
| ## Extra environment variables that will be pass onto deployment pods | ||||
| ## | ||||
| ## to provide grafana with access to CloudWatch on AWS EKS: | ||||
| ## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) | ||||
| ## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the | ||||
| ## same oidc eks provider as noted before (same as the existing line) | ||||
| ## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name | ||||
| ## | ||||
| ##  "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", | ||||
| ## | ||||
| ## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess | ||||
| ## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) | ||||
| ## | ||||
| ## env: | ||||
| ##   AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here | ||||
| ##   AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token | ||||
| ##   AWS_REGION: us-east-1 | ||||
| ## | ||||
| ## 5. uncomment the EKS section in extraSecretMounts: below | ||||
| ## 6. uncomment the annotation section in the serviceAccount: above | ||||
| ## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn | ||||
|  | ||||
| env: {} | ||||
|  | ||||
| ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. | ||||
| ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core | ||||
| ## Renders in container spec as: | ||||
| ##   env: | ||||
| ##     ... | ||||
| ##     - name: <key> | ||||
| ##       valueFrom: | ||||
| ##         <value rendered as YAML> | ||||
| envValueFrom: {} | ||||
|   #  ENV_NAME: | ||||
|   #    configMapKeyRef: | ||||
|   #      name: configmap-name | ||||
|   #      key: value_key | ||||
|  | ||||
| ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment | ||||
| ## This can be useful for auth tokens, etc. Value is templated. | ||||
| envFromSecret: "" | ||||
|  | ||||
| ## Sensible environment variables that will be rendered as new secret object | ||||
| ## This can be useful for auth tokens, etc | ||||
| envRenderSecret: {} | ||||
|  | ||||
| ## The names of secrets in the same kubernetes namespace which contain values to be added to the environment | ||||
| ## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. | ||||
| ## Name is templated. | ||||
| envFromSecrets: [] | ||||
| ## - name: secret-name | ||||
| ##   optional: true | ||||
|  | ||||
| ## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment | ||||
| ## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. | ||||
| ## Name is templated. | ||||
| ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core | ||||
| envFromConfigMaps: [] | ||||
| ## - name: configmap-name | ||||
| ##   optional: true | ||||
|  | ||||
| # Inject Kubernetes services as environment variables. | ||||
| # See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables | ||||
| enableServiceLinks: true | ||||
|  | ||||
| ## Additional grafana server secret mounts | ||||
| # Defines additional mounts with secrets. Secrets must be manually created in the namespace. | ||||
| extraSecretMounts: [] | ||||
|   # - name: secret-files | ||||
|   #   mountPath: /etc/secrets | ||||
|   #   secretName: grafana-secret-files | ||||
|   #   readOnly: true | ||||
|   #   subPath: "" | ||||
|   # | ||||
|   # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) | ||||
|   # - name: aws-iam-token | ||||
|   #   mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount | ||||
|   #   readOnly: true | ||||
|   #   projected: | ||||
|   #     defaultMode: 420 | ||||
|   #     sources: | ||||
|   #       - serviceAccountToken: | ||||
|   #           audience: sts.amazonaws.com | ||||
|   #           expirationSeconds: 86400 | ||||
|   #           path: token | ||||
|   # | ||||
|   # for CSI e.g. Azure Key Vault use the following | ||||
|   # - name: secrets-store-inline | ||||
|   #  mountPath: /run/secrets | ||||
|   #  readOnly: true | ||||
|   #  csi: | ||||
|   #    driver: secrets-store.csi.k8s.io | ||||
|   #    readOnly: true | ||||
|   #    volumeAttributes: | ||||
|   #      secretProviderClass: "akv-grafana-spc" | ||||
|   #    nodePublishSecretRef:                       # Only required when using service principal mode | ||||
|   #       name: grafana-akv-creds                  # Only required when using service principal mode | ||||
|  | ||||
| ## Additional grafana server volume mounts | ||||
| # Defines additional volume mounts. | ||||
| extraVolumeMounts: [] | ||||
|   # - name: extra-volume-0 | ||||
|   #   mountPath: /mnt/volume0 | ||||
|   #   readOnly: true | ||||
|   #   existingClaim: volume-claim | ||||
|   # - name: extra-volume-1 | ||||
|   #   mountPath: /mnt/volume1 | ||||
|   #   readOnly: true | ||||
|   #   hostPath: /usr/shared/ | ||||
|  | ||||
| ## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request | ||||
| lifecycleHooks: {} | ||||
|   # postStart: | ||||
|   #   exec: | ||||
|   #     command: [] | ||||
|  | ||||
| ## Pass the plugins you want installed as a list. | ||||
| ## | ||||
| plugins: [] | ||||
|   # - digrich-bubblechart-panel | ||||
|   # - grafana-clock-panel | ||||
|  | ||||
| ## Configure grafana datasources | ||||
| ## ref: http://docs.grafana.org/administration/provisioning/#datasources | ||||
| ## | ||||
| datasources: {} | ||||
| #  datasources.yaml: | ||||
| #    apiVersion: 1 | ||||
| #    datasources: | ||||
| #    - name: Prometheus | ||||
| #      type: prometheus | ||||
| #      url: http://prometheus-prometheus-server | ||||
| #      access: proxy | ||||
| #      isDefault: true | ||||
| #    - name: CloudWatch | ||||
| #      type: cloudwatch | ||||
| #      access: proxy | ||||
| #      uid: cloudwatch | ||||
| #      editable: false | ||||
| #      jsonData: | ||||
| #        authType: default | ||||
| #        defaultRegion: us-east-1 | ||||
|  | ||||
| ## Configure notifiers | ||||
| ## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels | ||||
| ## | ||||
| notifiers: {} | ||||
| #  notifiers.yaml: | ||||
| #    notifiers: | ||||
| #    - name: email-notifier | ||||
| #      type: email | ||||
| #      uid: email1 | ||||
| #      # either: | ||||
| #      org_id: 1 | ||||
| #      # or | ||||
| #      org_name: Main Org. | ||||
| #      is_default: true | ||||
| #      settings: | ||||
| #        addresses: an_email_address@example.com | ||||
| #    delete_notifiers: | ||||
|  | ||||
| ## Configure grafana dashboard providers | ||||
| ## ref: http://docs.grafana.org/administration/provisioning/#dashboards | ||||
| ## | ||||
| ## `path` must be /var/lib/grafana/dashboards/<provider_name> | ||||
| ## | ||||
| dashboardProviders: {} | ||||
| #  dashboardproviders.yaml: | ||||
| #    apiVersion: 1 | ||||
| #    providers: | ||||
| #    - name: 'default' | ||||
| #      orgId: 1 | ||||
| #      folder: '' | ||||
| #      type: file | ||||
| #      disableDeletion: false | ||||
| #      editable: true | ||||
| #      options: | ||||
| #        path: /var/lib/grafana/dashboards/default | ||||
|  | ||||
| ## Configure grafana dashboard to import | ||||
| ## NOTE: To use dashboards you must also enable/configure dashboardProviders | ||||
| ## ref: https://grafana.com/dashboards | ||||
| ## | ||||
| ## dashboards per provider, use provider name as key. | ||||
| ## | ||||
| dashboards: {} | ||||
|   # default: | ||||
|   #   some-dashboard: | ||||
|   #     json: | | ||||
|   #       $RAW_JSON | ||||
|   #   custom-dashboard: | ||||
|   #     file: dashboards/custom-dashboard.json | ||||
|   #   prometheus-stats: | ||||
|   #     gnetId: 2 | ||||
|   #     revision: 2 | ||||
|   #     datasource: Prometheus | ||||
|   #   local-dashboard: | ||||
|   #     url: https://example.com/repository/test.json | ||||
|   #     token: '' | ||||
|   #   local-dashboard-base64: | ||||
|   #     url: https://example.com/repository/test-b64.json | ||||
|   #     token: '' | ||||
|   #     b64content: true | ||||
|  | ||||
| ## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. | ||||
| ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. | ||||
| ## ConfigMap data example: | ||||
| ## | ||||
| ## data: | ||||
| ##   example-dashboard.json: | | ||||
| ##     RAW_JSON | ||||
| ## | ||||
| dashboardsConfigMaps: {} | ||||
| #  default: "" | ||||
|  | ||||
| ## Grafana's primary configuration | ||||
| ## NOTE: values in map will be converted to ini format | ||||
| ## ref: http://docs.grafana.org/installation/configuration/ | ||||
| ## | ||||
| grafana.ini: | ||||
|   paths: | ||||
|     data: /var/lib/grafana/ | ||||
|     logs: /var/log/grafana | ||||
|     plugins: /var/lib/grafana/plugins | ||||
|     provisioning: /etc/grafana/provisioning | ||||
|   analytics: | ||||
|     check_for_updates: true | ||||
|   log: | ||||
|     mode: console | ||||
|   grafana_net: | ||||
|     url: https://grafana.net | ||||
| ## grafana Authentication can be enabled with the following values on grafana.ini | ||||
|  # server: | ||||
|       # The full public facing url you use in browser, used for redirects and emails | ||||
|  #    root_url: | ||||
|  # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana | ||||
|  # auth.github: | ||||
|  #    enabled: false | ||||
|  #    allow_sign_up: false | ||||
|  #    scopes: user:email,read:org | ||||
|  #    auth_url: https://github.com/login/oauth/authorize | ||||
|  #    token_url: https://github.com/login/oauth/access_token | ||||
|  #    api_url: https://api.github.com/user | ||||
|  #    team_ids: | ||||
|  #    allowed_organizations: | ||||
|  #    client_id: | ||||
|  #    client_secret: | ||||
| ## LDAP Authentication can be enabled with the following values on grafana.ini | ||||
| ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid | ||||
|   # auth.ldap: | ||||
|   #   enabled: true | ||||
|   #   allow_sign_up: true | ||||
|   #   config_file: /etc/grafana/ldap.toml | ||||
|  | ||||
| ## Grafana's LDAP configuration | ||||
| ## Templated by the template in _helpers.tpl | ||||
| ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled | ||||
| ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap | ||||
| ## ref: http://docs.grafana.org/installation/ldap/#configuration | ||||
| ldap: | ||||
|   enabled: false | ||||
|   # `existingSecret` is a reference to an existing secret containing the ldap configuration | ||||
|   # for Grafana in a key `ldap-toml`. | ||||
|   existingSecret: "" | ||||
|   # `config` is the content of `ldap.toml` that will be stored in the created secret | ||||
|   config: "" | ||||
|   # config: |- | ||||
|   #   verbose_logging = true | ||||
|  | ||||
|   #   [[servers]] | ||||
|   #   host = "my-ldap-server" | ||||
|   #   port = 636 | ||||
|   #   use_ssl = true | ||||
|   #   start_tls = false | ||||
|   #   ssl_skip_verify = false | ||||
|   #   bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" | ||||
|  | ||||
| ## Grafana's SMTP configuration | ||||
| ## NOTE: To enable, grafana.ini must be configured with smtp.enabled | ||||
| ## ref: http://docs.grafana.org/installation/configuration/#smtp | ||||
| smtp: | ||||
|   # `existingSecret` is a reference to an existing secret containing the smtp configuration | ||||
|   # for Grafana. | ||||
|   existingSecret: "" | ||||
|   userKey: "user" | ||||
|   passwordKey: "password" | ||||
|  | ||||
| ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | ||||
| ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards | ||||
| sidecar: | ||||
|   image: | ||||
|     repository: quay.io/kiwigrid/k8s-sidecar | ||||
|     tag: 1.15.6 | ||||
|     sha: "" | ||||
|   imagePullPolicy: IfNotPresent | ||||
|   resources: {} | ||||
| #   limits: | ||||
| #     cpu: 100m | ||||
| #     memory: 100Mi | ||||
| #   requests: | ||||
| #     cpu: 50m | ||||
| #     memory: 50Mi | ||||
|   securityContext: {} | ||||
|   # skipTlsVerify Set to true to skip tls verification for kube api calls | ||||
|   # skipTlsVerify: true | ||||
|   enableUniqueFilenames: false | ||||
|   readinessProbe: {} | ||||
|   livenessProbe: {} | ||||
|   dashboards: | ||||
|     enabled: false | ||||
|     SCProvider: true | ||||
|     # label that the configmaps with dashboards are marked with | ||||
|     label: grafana_dashboard | ||||
|     # value of label that the configmaps with dashboards are set to | ||||
|     labelValue: null | ||||
|     # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) | ||||
|     folder: /tmp/dashboards | ||||
|     # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead | ||||
|     defaultFolderName: null | ||||
|     # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. | ||||
|     # Otherwise the namespace in which the sidecar is running will be used. | ||||
|     # It's also possible to specify ALL to search in all namespaces. | ||||
|     searchNamespace: null | ||||
|     # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | ||||
|     watchMethod: WATCH | ||||
|     # search in configmap, secret or both | ||||
|     resource: both | ||||
|     # If specified, the sidecar will look for annotation with this name to create folder and put graph here. | ||||
|     # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | ||||
|     folderAnnotation: null | ||||
|     # Absolute path to shell script to execute after a configmap got reloaded | ||||
|     script: null | ||||
|     # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. | ||||
|     # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S | ||||
|     # watchServerTimeout: 3600 | ||||
|     # | ||||
|     # watchClientTimeout: is a client-side timeout, configuring your local socket. | ||||
|     # If you have a network outage dropping all packets with no RST/FIN, | ||||
|     # this is how long your client waits before realizing & dropping the connection. | ||||
|     # defaults to 66sec (sic!) | ||||
|     # watchClientTimeout: 60 | ||||
|     # | ||||
|     # provider configuration that lets grafana manage the dashboards | ||||
|     provider: | ||||
|       # name of the provider, should be unique | ||||
|       name: sidecarProvider | ||||
|       # orgid as configured in grafana | ||||
|       orgid: 1 | ||||
|       # folder in which the dashboards should be imported in grafana | ||||
|       folder: '' | ||||
|       # type of the provider | ||||
|       type: file | ||||
|       # disableDelete to activate a import-only behaviour | ||||
|       disableDelete: false | ||||
|       # allow updating provisioned dashboards from the UI | ||||
|       allowUiUpdates: false | ||||
|       # allow Grafana to replicate dashboard structure from filesystem | ||||
|       foldersFromFilesStructure: false | ||||
|     # Additional dashboard sidecar volume mounts | ||||
|     extraMounts: [] | ||||
|     # Sets the size limit of the dashboard sidecar emptyDir volume | ||||
|     sizeLimit: {} | ||||
|   datasources: | ||||
|     enabled: false | ||||
|     # label that the configmaps with datasources are marked with | ||||
|     label: grafana_datasource | ||||
|     # value of label that the configmaps with datasources are set to | ||||
|     labelValue: null | ||||
|     # If specified, the sidecar will search for datasource config-maps inside this namespace. | ||||
|     # Otherwise the namespace in which the sidecar is running will be used. | ||||
|     # It's also possible to specify ALL to search in all namespaces | ||||
|     searchNamespace: null | ||||
|     # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | ||||
|     watchMethod: WATCH | ||||
|     # search in configmap, secret or both | ||||
|     resource: both | ||||
|     # Endpoint to send request to reload datasources | ||||
|     reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" | ||||
|     skipReload: false | ||||
|     # Deploy the datasource sidecar as an initContainer in addition to a container. | ||||
|     # This is needed if skipReload is true, to load any datasources defined at startup time. | ||||
|     initDatasources: false | ||||
|     # Sets the size limit of the datasource sidecar emptyDir volume | ||||
|     sizeLimit: {} | ||||
|   plugins: | ||||
|     enabled: false | ||||
|     # label that the configmaps with plugins are marked with | ||||
|     label: grafana_plugin | ||||
|     # value of label that the configmaps with plugins are set to | ||||
|     labelValue: null | ||||
|     # If specified, the sidecar will search for plugin config-maps inside this namespace. | ||||
|     # Otherwise the namespace in which the sidecar is running will be used. | ||||
|     # It's also possible to specify ALL to search in all namespaces | ||||
|     searchNamespace: null | ||||
|     # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | ||||
|     watchMethod: WATCH | ||||
|     # search in configmap, secret or both | ||||
|     resource: both | ||||
|     # Endpoint to send request to reload plugins | ||||
|     reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" | ||||
|     skipReload: false | ||||
|     # Deploy the datasource sidecar as an initContainer in addition to a container. | ||||
|     # This is needed if skipReload is true, to load any plugins defined at startup time. | ||||
|     initPlugins: false | ||||
|     # Sets the size limit of the plugin sidecar emptyDir volume | ||||
|     sizeLimit: {} | ||||
|   notifiers: | ||||
|     enabled: false | ||||
|     # label that the configmaps with notifiers are marked with | ||||
|     label: grafana_notifier | ||||
|     # If specified, the sidecar will search for notifier config-maps inside this namespace. | ||||
|     # Otherwise the namespace in which the sidecar is running will be used. | ||||
|     # It's also possible to specify ALL to search in all namespaces | ||||
|     searchNamespace: null | ||||
|     # search in configmap, secret or both | ||||
|     resource: both | ||||
|     # Sets the size limit of the notifier sidecar emptyDir volume | ||||
|     sizeLimit: {} | ||||
|  | ||||
| ## Override the deployment namespace | ||||
| ## | ||||
| namespaceOverride: "" | ||||
|  | ||||
| ## Number of old ReplicaSets to retain | ||||
| ## | ||||
| revisionHistoryLimit: 10 | ||||
|  | ||||
| ## Add a seperate remote image renderer deployment/service | ||||
| imageRenderer: | ||||
|   # Enable the image-renderer deployment & service | ||||
|   enabled: false | ||||
|   replicas: 1 | ||||
|   image: | ||||
|     # image-renderer Image repository | ||||
|     repository: grafana/grafana-image-renderer | ||||
|     # image-renderer Image tag | ||||
|     tag: latest | ||||
|     # image-renderer Image sha (optional) | ||||
|     sha: "" | ||||
|     # image-renderer ImagePullPolicy | ||||
|     pullPolicy: Always | ||||
|   # extra environment variables | ||||
|   env: | ||||
|     HTTP_HOST: "0.0.0.0" | ||||
|     # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 | ||||
|     # RENDERING_MODE: clustered | ||||
|     # IGNORE_HTTPS_ERRORS: true | ||||
|   # image-renderer deployment serviceAccount | ||||
|   serviceAccountName: "" | ||||
|   # image-renderer deployment securityContext | ||||
|   securityContext: {} | ||||
|   # image-renderer deployment Host Aliases | ||||
|   hostAliases: [] | ||||
|   # image-renderer deployment priority class | ||||
|   priorityClassName: '' | ||||
|   service: | ||||
|     # Enable the image-renderer service | ||||
|     enabled: true | ||||
|     # image-renderer service port name | ||||
|     portName: 'http' | ||||
|     # image-renderer service port used by both service and deployment | ||||
|     port: 8081 | ||||
|     targetPort: 8081 | ||||
|   # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana | ||||
|   grafanaProtocol: http | ||||
|   # In case a sub_path is used this needs to be added to the image renderer callback | ||||
|   grafanaSubPath: "" | ||||
|   # name of the image-renderer port on the pod | ||||
|   podPortName: http | ||||
|   # number of image-renderer replica sets to keep | ||||
|   revisionHistoryLimit: 10 | ||||
|   networkPolicy: | ||||
|     # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods | ||||
|     limitIngress: true | ||||
|     # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | ||||
|     limitEgress: false | ||||
|   resources: {} | ||||
| #   limits: | ||||
| #     cpu: 100m | ||||
| #     memory: 100Mi | ||||
| #   requests: | ||||
| #     cpu: 50m | ||||
| #     memory: 50Mi | ||||
|   ## Node labels for pod assignment | ||||
|   ## ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
|   # | ||||
|   nodeSelector: {} | ||||
|  | ||||
|   ## Tolerations for pod assignment | ||||
|   ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
|   ## | ||||
|   tolerations: [] | ||||
|  | ||||
|   ## Affinity for pod assignment (evaluated as template) | ||||
|   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
|   ## | ||||
|   affinity: {} | ||||
|  | ||||
| # Create a dynamic manifests via values: | ||||
| extraObjects: [] | ||||
|   # - apiVersion: "kubernetes-client.io/v1" | ||||
|   #   kind: ExternalSecret | ||||
|   #   metadata: | ||||
|   #     name: grafana-secrets | ||||
|   #   spec: | ||||
|   #     backendType: gcpSecretsManager | ||||
|   #     data: | ||||
|   #       - key: grafana-admin-password | ||||
|   #         name: adminPassword | ||||
|  | ||||
							
								
								
									
										157
									
								
								apps/monitoring/influxdb-telegraf.values.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								apps/monitoring/influxdb-telegraf.values.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | ||||
| ## Default values.yaml for Telegraf | ||||
| ## This is a YAML-formatted file. | ||||
| ## ref: https://hub.docker.com/r/library/telegraf/tags/ | ||||
|  | ||||
| image: | ||||
|   repo: "telegraf" | ||||
|   tag: "1.22" | ||||
|   pullPolicy: IfNotPresent | ||||
|  | ||||
| ## Configure resource requests and limits | ||||
| ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ | ||||
| resources: | ||||
|   requests: | ||||
|     memory: 256Mi | ||||
|     cpu: 0.1 | ||||
|   limits: | ||||
|     memory: 1Gi | ||||
|     cpu: 1 | ||||
|  | ||||
| ## Pod annotations | ||||
| podAnnotations: {} | ||||
|  | ||||
| ## Pod labels | ||||
| podLabels: {} | ||||
|  | ||||
| ## Configure args passed to Telegraf containers | ||||
| args: [] | ||||
|  | ||||
| ## The name of a secret in the same kubernetes namespace which contains values to | ||||
| ## be added to the environment (must be manually created) | ||||
| ## This can be useful for auth tokens, etc. | ||||
| # envFromSecret: "telegraf-tokens" | ||||
|  | ||||
| ## Environment | ||||
| env: | ||||
|   # This pulls HOSTNAME from the node, not the pod. | ||||
|   - name: HOSTNAME | ||||
|     valueFrom: | ||||
|       fieldRef: | ||||
|         fieldPath: spec.nodeName | ||||
|   # In test clusters where hostnames are resolved in /etc/hosts on each node, | ||||
|   # the HOSTNAME is not resolvable from inside containers | ||||
|   # So inject the host IP as well | ||||
|   - name: HOSTIP | ||||
|     valueFrom: | ||||
|       fieldRef: | ||||
|         fieldPath: status.hostIP | ||||
|   # Mount the host filesystem and set the appropriate env variables. | ||||
|   # ref: https://github.com/influxdata/telegraf/blob/master/docs/FAQ.md | ||||
|   # HOST_PROC is required by the cpu, disk, diskio, kernel and processes input plugins | ||||
|   - name: "HOST_PROC" | ||||
|     value: "/hostfs/proc" | ||||
|   # HOST_SYS is required by the diskio plugin | ||||
|   - name: "HOST_SYS" | ||||
|     value: "/hostfs/sys" | ||||
|   - name: "HOST_MOUNT_PREFIX" | ||||
|     value: "/hostfs" | ||||
|  | ||||
| ## Add custom volumes and mounts | ||||
| # volumes: | ||||
| # - name: telegraf-output-influxdb2 | ||||
| #   configMap: | ||||
| #     name: "telegraf-output-influxdb2" | ||||
| # mountPoints: | ||||
| # - name: telegraf-output-influxdb2 | ||||
| #   mountPath: /etc/telegraf/conf.d | ||||
| #   subPath: influxdb2.conf | ||||
|  | ||||
| ## Tolerations for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| ## | ||||
| tolerations: [] | ||||
|  | ||||
| ## If the DaemonSet should run on the host's network namespace | ||||
| ## hostNetwork: true | ||||
|  | ||||
| ## If using hostNetwork=true, set dnsPolicy to ClusterFirstWithHostNet | ||||
| ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/# | ||||
| ## dnsPolicy: ClusterFirstWithHostNet | ||||
|  | ||||
| ## If using dnsPolicy=None, set dnsConfig | ||||
| ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | ||||
| ## dnsConfig: | ||||
| ##   nameservers: | ||||
| ##     - 1.2.3.4 | ||||
| ##   searches: | ||||
| ##     - ns1.svc.cluster-domain.example | ||||
| ##     - my.dns.search.suffix | ||||
| ##   options: | ||||
| ##     - name: ndots | ||||
| ##       value: "2" | ||||
| ##     - name: edns0 | ||||
|  | ||||
| rbac: | ||||
|   # Specifies whether RBAC resources should be created | ||||
|   create: true | ||||
|  | ||||
| serviceAccount: | ||||
|   # Specifies whether a ServiceAccount should be created | ||||
|   create: true | ||||
|   # The name of the ServiceAccount to use. | ||||
|   # If not set and create is true, a name is generated using the fullname template | ||||
|   # name: | ||||
|   # Annotations for the ServiceAccount | ||||
|   annotations: {} | ||||
|  | ||||
| ## Specify priorityClassName | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ | ||||
| # priorityClassName: system-node-critical | ||||
|  | ||||
| # Specify the pod's SecurityContext, including the OS user and group to run the pod | ||||
| podSecurityContext: {} | ||||
|  | ||||
| override_config: | ||||
|   toml: ~ | ||||
|   # Provide a literal TOML config | ||||
|   # toml: |+ | ||||
|   #   [global_tags] | ||||
|   #     foo = "bar" | ||||
|   #   [agent] | ||||
|   #     interval = "10s" | ||||
|   #   [[inputs.mem]] | ||||
|   #   [[outputs.influxdb_v2]] | ||||
|   #     urls           = ["https://us-west-2-1.aws.cloud2.influxdata.com"] | ||||
|   #     bucket         = "data" | ||||
|   #     organization   = "OurCompany" | ||||
|   #     token          = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" | ||||
|  | ||||
| ## Exposed telegraf configuration | ||||
| ## ref: https://docs.influxdata.com/telegraf/v1.13/administration/configuration/ | ||||
| config: | ||||
|   # global_tags: | ||||
|   #   cluster: "mycluster" | ||||
|   agent: | ||||
|     interval: "10s" | ||||
|     round_interval: true | ||||
|     metric_batch_size: 1000 | ||||
|     metric_buffer_limit: 10000 | ||||
|     collection_jitter: "0s" | ||||
|     flush_interval: "10s" | ||||
|     flush_jitter: "0s" | ||||
|     precision: "" | ||||
|     debug: false | ||||
|     quiet: false | ||||
|     logfile: "" | ||||
|     hostname: "$HOSTNAME" | ||||
|     omit_hostname: false | ||||
|   outputs: | ||||
|     - influxdb_v2: | ||||
|         urls: | ||||
|           - "http://influxdb-influxdb2.monitoring:80" | ||||
|         token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ== | ||||
|         organization: "influxdata" | ||||
|         bucket: "kluster" | ||||
|   monitor_self: false | ||||
|   docker_endpoint: "unix:///run/k3s/containerd/containerd.sock" | ||||
|  | ||||
							
								
								
									
										35
									
								
								apps/monitoring/influxdb.pvc.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								apps/monitoring/influxdb.pvc.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: PersistentVolume | ||||
| metadata: | ||||
|   name: influxdb-nfs | ||||
|   labels: | ||||
|     directory: influxdb | ||||
| spec: | ||||
|   storageClassName: slow | ||||
|   capacity: | ||||
|     storage: "10Gi" | ||||
|   volumeMode: Filesystem | ||||
|   accessModes: | ||||
|     - ReadWriteOnce | ||||
|   nfs: | ||||
|     path: /export/kluster/influxdb | ||||
|     server: 192.168.1.157 | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: PersistentVolumeClaim | ||||
| metadata: | ||||
|   name: influxdb-nfs | ||||
| spec: | ||||
|   storageClassName: slow | ||||
|   accessModes: | ||||
|     - ReadWriteOnce | ||||
|   resources: | ||||
|     requests: | ||||
|       storage: "10Gi" | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       directory: influxdb | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										195
									
								
								apps/monitoring/influxdb.values.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										195
									
								
								apps/monitoring/influxdb.values.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,195 @@ | ||||
| image: | ||||
|   repository: influxdb | ||||
|   tag: 2.3.0-alpine | ||||
|   pullPolicy: IfNotPresent | ||||
|  | ||||
| ## Annotations to be added to InfluxDB pods | ||||
| ## | ||||
| podAnnotations: {} | ||||
|  | ||||
| ## Labels to be added to InfluxDB pods | ||||
| ## | ||||
| podLabels: {} | ||||
|  | ||||
| nameOverride: "" | ||||
| fullnameOverride: "" | ||||
|  | ||||
| ## Configure resource requests and limits | ||||
| ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ | ||||
| ## | ||||
| resources: {} | ||||
|   # We usually recommend not to specify default resources and to leave this as a conscious | ||||
|   # choice for the user. This also increases chances charts run on environments with little | ||||
|   # resources, such as Minikube. If you do want to specify resources, uncomment the following | ||||
|   # lines, adjust them as necessary, and remove the curly braces after 'resources:'. | ||||
|   # limits: | ||||
|   #  cpu: 100m | ||||
|   #  memory: 128Mi | ||||
|   # requests: | ||||
|   #  cpu: 100m | ||||
|   #  memory: 128Mi | ||||
|  | ||||
| ## Node labels for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
| ## | ||||
| nodeSelector: {} | ||||
|  | ||||
| ## Tolerations for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| ## | ||||
| tolerations: [] | ||||
|  | ||||
| ## Affinity for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
| ## | ||||
| affinity: {} | ||||
|  | ||||
| securityContext: {} | ||||
|  | ||||
| ## Customize liveness, readiness and startup probes | ||||
| ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ | ||||
| ## | ||||
| livenessProbe: {} | ||||
|   # path: "/health" | ||||
|   # scheme: "HTTP" | ||||
|   # initialDelaySeconds: 0 | ||||
|   # periodSeconds: 10 | ||||
|   # timeoutSeconds: 1 | ||||
|   # failureThreshold: 3 | ||||
|  | ||||
| readinessProbe: {} | ||||
|   # path: "/health" | ||||
|   # scheme: "HTTP" | ||||
|   # initialDelaySeconds: 0 | ||||
|   # periodSeconds: 10 | ||||
|   # timeoutSeconds: 1 | ||||
|   # successThreshold: 1 | ||||
|   # failureThreshold: 3 | ||||
|  | ||||
| startupProbe: | ||||
|   enabled: false | ||||
|   # path: "/health" | ||||
|   # scheme: "HTTP" | ||||
|   # initialDelaySeconds: 30 | ||||
|   # periodSeconds: 5 | ||||
|   # timeoutSeconds: 1 | ||||
|   # failureThreshold: 6 | ||||
|  | ||||
| ## Extra environment variables to configure influxdb | ||||
| ## e.g. | ||||
| # env: | ||||
| #   - name: FOO | ||||
| #     value: BAR | ||||
| #   - name: BAZ | ||||
| #     valueFrom: | ||||
| #       secretKeyRef: | ||||
| #         name: my-secret | ||||
| #         key: my-key | ||||
| env: {} | ||||
|  | ||||
| ## Create default user through docker entrypoint | ||||
| ## Defaults indicated below | ||||
| ## | ||||
| adminUser: | ||||
|   organization: "influxdata" | ||||
|   bucket: "default" | ||||
|   user: "admin" | ||||
|   retention_policy: "0s" | ||||
|   ## Leave empty to generate a random password and token. | ||||
|   ## Or fill any of these values to use fixed values. | ||||
|   password: "" | ||||
|   token: "" | ||||
|  | ||||
|   ## The password and token are obtained from an existing secret. The expected | ||||
|   ## keys are `admin-password` and `admin-token`. | ||||
|   ## If set, the password and token values above are ignored. | ||||
|   # existingSecret: influxdb-auth | ||||
|  | ||||
| ## Persist data to a persistent volume | ||||
| ## | ||||
| persistence: | ||||
|   enabled: true | ||||
|   ## If true will use an existing PVC instead of creating one | ||||
|   useExisting: true | ||||
|   ## Name of existing PVC to be used in the influx deployment | ||||
|   name: influxdb-nfs | ||||
|   ## influxdb data Persistent Volume Storage Class | ||||
|   ## If defined, storageClassName: <storageClass> | ||||
|   ## If set to "-", storageClassName: "", which disables dynamic provisioning | ||||
|   ## If undefined (the default) or set to null, no storageClassName spec is | ||||
|   ##   set, choosing the default provisioner.  (gp2 on AWS, standard on | ||||
|   ##   GKE, AWS & OpenStack) | ||||
|   ## | ||||
|   # storageClass: "-" | ||||
|   accessMode: ReadWriteOnce | ||||
|   size: 10Gi | ||||
|   mountPath: /var/lib/influxdb2 | ||||
|   subPath: "" | ||||
|  | ||||
| ## Add custom volume and volumeMounts | ||||
| ## | ||||
| # volumes: | ||||
| #   - name: influxdb2-templates | ||||
| #     hostPath: | ||||
| #       path: /data/influxdb2-templates | ||||
| #       type: Directory | ||||
| # mountPoints: | ||||
| #   - name: influxdb2-templates | ||||
| #     mountPath: /influxdb2-templates | ||||
| #     readOnly: true | ||||
|  | ||||
| ## Allow executing custom init scripts | ||||
| ## If the container finds any files with the .sh extension inside of the | ||||
| ## /docker-entrypoint-initdb.d folder, it will execute them. | ||||
| ## When multiple scripts are present, they will be executed in lexical sort order by name. | ||||
| ## For more details see Custom Initialization Scripts in https://hub.docker.com/_/influxdb | ||||
| initScripts: | ||||
|   enabled: false | ||||
|   scripts: | ||||
|     init.sh: |+ | ||||
|       #!/bin/bash | ||||
|       influx apply --force yes -u https://raw.githubusercontent.com/influxdata/community-templates/master/influxdb2_operational_monitoring/influxdb2_operational_monitoring.yml | ||||
|  | ||||
| ## Specify a service type | ||||
| ## ref: http://kubernetes.io/docs/user-guide/services/ | ||||
| ## | ||||
| service: | ||||
|   type: LoadBalancer | ||||
|   loadBalancerIP: 192.168.3.4 | ||||
|   port: 80 | ||||
|   targetPort: 8086 | ||||
|   annotations: {} | ||||
|   labels: {} | ||||
|   portName: http | ||||
|  | ||||
| serviceAccount: | ||||
|   # Specifies whether a ServiceAccount should be created | ||||
|   create: true | ||||
|   # The name of the ServiceAccount to use. | ||||
|   # If not set and create is true, a name is generated using the fullname template | ||||
|   name: | ||||
|   # Annotations for the ServiceAccount | ||||
|   annotations: {} | ||||
|  | ||||
| ingress: | ||||
|   enabled: false | ||||
|   # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName | ||||
|   # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress | ||||
|   # className: nginx | ||||
|   tls: false | ||||
|   # secretName: my-tls-cert # only needed if tls above is true or default certificate is not configured for Nginx | ||||
|   hostname: influxdb.foobar.com | ||||
|   annotations: {} | ||||
|     # kubernetes.io/ingress.class: "nginx" | ||||
|     # kubernetes.io/tls-acme: "true" | ||||
|   path: / | ||||
|  | ||||
| ## Pod disruption budget configuration | ||||
| ## | ||||
| pdb: | ||||
|   ## Specifies whether a Pod disruption budget should be created | ||||
|   ## | ||||
|   create: true | ||||
|   minAvailable: 1 | ||||
|   # maxUnavailable: 1 | ||||
|  | ||||
							
								
								
									
										167
									
								
								apps/monitoring/telegraf-adguard.values.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										167
									
								
								apps/monitoring/telegraf-adguard.values.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,167 @@ | ||||
| ## Default values.yaml for Telegraf | ||||
| ## This is a YAML-formatted file. | ||||
| ## ref: https://hub.docker.com/r/library/telegraf/tags/ | ||||
|  | ||||
| replicaCount: 1 | ||||
| image: | ||||
|   repo: "telegraf" | ||||
|   tag: "1.25" | ||||
|   pullPolicy: IfNotPresent | ||||
| podAnnotations: {} | ||||
| podLabels: {} | ||||
| imagePullSecrets: [] | ||||
| ## Configure args passed to Telegraf containers | ||||
| args: [] | ||||
| # The name of a secret in the same kubernetes namespace which contains values to | ||||
| # be added to the environment (must be manually created) | ||||
| # This can be useful for auth tokens, etc. | ||||
|  | ||||
| # envFromSecret: "telegraf-tokens" | ||||
| env: | ||||
|   - name: HOSTNAME | ||||
|     value: "telegraf-polling-service" | ||||
| # An older "volumeMounts" key was previously added which will likely | ||||
| # NOT WORK as you expect. Please use this newer configuration. | ||||
|  | ||||
| # volumes: | ||||
| # - name: telegraf-output-influxdb2 | ||||
| #   configMap: | ||||
| #     name: "telegraf-output-influxdb2" | ||||
| # mountPoints: | ||||
| # - name: telegraf-output-influxdb2 | ||||
| #   mountPath: /etc/telegraf/conf.d | ||||
| #   subPath: influxdb2.conf | ||||
|  | ||||
| ## Configure resource requests and limits | ||||
| ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ | ||||
| resources: {} | ||||
| # requests: | ||||
| #   memory: 128Mi | ||||
| #   cpu: 100m | ||||
| # limits: | ||||
| #   memory: 128Mi | ||||
| #   cpu: 100m | ||||
|  | ||||
| ## Node labels for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
| nodeSelector: {} | ||||
| ## Affinity for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
| ## | ||||
| affinity: {} | ||||
| ## Tolerations for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| ## | ||||
| tolerations: [] | ||||
| # - key: "key" | ||||
| #   operator: "Equal|Exists" | ||||
| #   value: "value" | ||||
| #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" | ||||
|  | ||||
| service: | ||||
|   enabled: false | ||||
|   type: ClusterIP | ||||
|   annotations: {} | ||||
| rbac: | ||||
|   # Specifies whether RBAC resources should be created | ||||
|   create: true | ||||
|   # Create only for the release namespace or cluster wide (Role vs ClusterRole) | ||||
|   clusterWide: false | ||||
|   # Rules for the created rule | ||||
|   rules: [] | ||||
| # When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be | ||||
| # able to scan the pods for scraping labels. The following rules have been taken from: | ||||
| # https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46 | ||||
| #    - apiGroups: | ||||
| #        - "" | ||||
| #      resources: | ||||
| #        - nodes | ||||
| #        - nodes/proxy | ||||
| #        - nodes/metrics | ||||
| #        - services | ||||
| #        - endpoints | ||||
| #        - pods | ||||
| #        - ingresses | ||||
| #        - configmaps | ||||
| #      verbs: | ||||
| #        - get | ||||
| #        - list | ||||
| #        - watch | ||||
| #    - apiGroups: | ||||
| #        - "extensions" | ||||
| #      resources: | ||||
| #        - ingresses/status | ||||
| #        - ingresses | ||||
| #      verbs: | ||||
| #        - get | ||||
| #        - list | ||||
| #        - watch | ||||
| #    - nonResourceURLs: | ||||
| #        - "/metrics" | ||||
| #      verbs: | ||||
| #        - get | ||||
|  | ||||
| serviceAccount: | ||||
|   # Specifies whether a ServiceAccount should be created | ||||
|   create: false | ||||
| ## Exposed telegraf configuration | ||||
| ## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` | ||||
| ## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ | ||||
| config: | ||||
|   agent: | ||||
|     interval: "2m" | ||||
|     round_interval: true | ||||
|     metric_batch_size: 1000 | ||||
|     metric_buffer_limit: 10000 | ||||
|     collection_jitter: "0s" | ||||
|     flush_interval: "10s" | ||||
|     flush_jitter: "0s" | ||||
|     precision: "" | ||||
|     debug: false | ||||
|     quiet: false | ||||
|     logfile: "" | ||||
|     hostname: "$HOSTNAME" | ||||
|     omit_hostname: false | ||||
|   processors: | ||||
|     - enum: | ||||
|         mapping: | ||||
|           field: "status" | ||||
|           dest: "status_code" | ||||
|           value_mappings: | ||||
|             healthy: 1 | ||||
|             problem: 2 | ||||
|             critical: 3 | ||||
|   outputs: | ||||
|     - influxdb_v2: | ||||
|         urls: | ||||
|           - "http://influxdb-influxdb2.monitoring:80" | ||||
|         token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA== | ||||
|         organization: "influxdata" | ||||
|         bucket: "homeassistant" | ||||
|   inputs: | ||||
|     - http: | ||||
|         urls: | ||||
|           - "http://adguard-home.adguard:3000/control/stats" | ||||
|         data_format: "json" | ||||
| metrics: | ||||
|   health: | ||||
|     enabled: false | ||||
|     service_address: "http://:8888" | ||||
|     threshold: 5000.0 | ||||
|   internal: | ||||
|     enabled: true | ||||
|     collect_memstats: false | ||||
| # Lifecycle hooks | ||||
| # hooks: | ||||
| #   postStart: ["/bin/sh", "-c", "echo Telegraf started"] | ||||
| #   preStop: ["/bin/sh", "-c", "sleep 60"] | ||||
|  | ||||
| ## Pod disruption budget configuration | ||||
| ## | ||||
| pdb: | ||||
|   ## Specifies whether a Pod disruption budget should be created | ||||
|   ## | ||||
|   create: true | ||||
|   minAvailable: 1 | ||||
|   # maxUnavailable: 1 | ||||
|  | ||||
							
								
								
									
										110
									
								
								apps/monitoring/telegraf-speedtest.values.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								apps/monitoring/telegraf-speedtest.values.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,110 @@ | ||||
| ## Default values.yaml for Telegraf | ||||
| ## This is a YAML-formatted file. | ||||
| ## ref: https://hub.docker.com/r/library/telegraf/tags/ | ||||
|  | ||||
| replicaCount: 1 | ||||
| image: | ||||
|   repo: "telegraf" | ||||
|   tag: "1.25" | ||||
|   pullPolicy: IfNotPresent | ||||
| podAnnotations: {} | ||||
| podLabels: {} | ||||
| imagePullSecrets: [] | ||||
| ## Configure args passed to Telegraf containers | ||||
| args: [] | ||||
| # The name of a secret in the same kubernetes namespace which contains values to | ||||
| # be added to the environment (must be manually created) | ||||
| # This can be useful for auth tokens, etc. | ||||
|  | ||||
| # envFromSecret: "telegraf-tokens" | ||||
| env: | ||||
|   - name: HOSTNAME | ||||
|     value: "telegraf-speedtest" | ||||
|  | ||||
| ## Configure resource requests and limits | ||||
| ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ | ||||
| resources: {} | ||||
| # requests: | ||||
| #   memory: 128Mi | ||||
| #   cpu: 100m | ||||
| # limits: | ||||
| #   memory: 128Mi | ||||
| #   cpu: 100m | ||||
|  | ||||
| ## Node labels for pod assignment | ||||
| ## ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
| nodeSelector: {} | ||||
| ## Affinity for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
| ## | ||||
| affinity: {} | ||||
| ## Tolerations for pod assignment | ||||
| ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| ## | ||||
| tolerations: [] | ||||
| # - key: "key" | ||||
| #   operator: "Equal|Exists" | ||||
| #   value: "value" | ||||
| #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" | ||||
| service: | ||||
|   enabled: false | ||||
| rbac: | ||||
|   # Specifies whether RBAC resources should be created | ||||
|   create: false | ||||
|  | ||||
| serviceAccount: | ||||
|   # Specifies whether a ServiceAccount should be created | ||||
|   create: false | ||||
|  | ||||
|  | ||||
| ## Exposed telegraf configuration | ||||
| ## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` | ||||
| ## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ | ||||
| config: | ||||
|   agent: | ||||
|     interval: "2h" | ||||
|     round_interval: true | ||||
|     metric_batch_size: 1000 | ||||
|     metric_buffer_limit: 10000 | ||||
|     collection_jitter: "0s" | ||||
|     flush_interval: "10s" | ||||
|     flush_jitter: "0s" | ||||
|     precision: "" | ||||
|     debug: false | ||||
|     quiet: false | ||||
|     logfile: "" | ||||
|     hostname: "$HOSTNAME" | ||||
|     omit_hostname: false | ||||
|   processors: | ||||
|     - enum: | ||||
|         mapping: | ||||
|           field: "status" | ||||
|           dest: "status_code" | ||||
|           value_mappings: | ||||
|             healthy: 1 | ||||
|             problem: 2 | ||||
|             critical: 3 | ||||
|   outputs: | ||||
|     - influxdb_v2: | ||||
|         urls: | ||||
|           - "http://influxdb-influxdb2.monitoring:80" | ||||
|         token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA== | ||||
|         organization: "influxdata" | ||||
|         bucket: "homeassistant" | ||||
|   inputs: | ||||
|     - internet_speed: | ||||
|         enable_file_download: false | ||||
|  | ||||
| # Lifecycle hooks | ||||
| # hooks: | ||||
| #   postStart: ["/bin/sh", "-c", "echo Telegraf started"] | ||||
| #   preStop: ["/bin/sh", "-c", "sleep 60"] | ||||
|  | ||||
| ## Pod disruption budget configuration | ||||
| ## | ||||
| pdb: | ||||
|   ## Specifies whether a Pod disruption budget should be created | ||||
|   ## | ||||
|   create: true | ||||
|   minAvailable: 1 | ||||
|   # maxUnavailable: 1 | ||||
		Reference in New Issue
	
	Block a user