## Default values.yaml for Telegraf ## This is a YAML-formatted file. ## ref: https://hub.docker.com/r/library/telegraf/tags/ replicaCount: 1 image: repo: "telegraf" tag: "1.24" pullPolicy: IfNotPresent podAnnotations: {} podLabels: {} imagePullSecrets: [] ## Configure args passed to Telegraf containers args: [] # The name of a secret in the same kubernetes namespace which contains values to # be added to the environment (must be manually created) # This can be useful for auth tokens, etc. # envFromSecret: "telegraf-tokens" env: - name: HOSTNAME value: "telegraf-polling-service" # An older "volumeMounts" key was previously added which will likely # NOT WORK as you expect. Please use this newer configuration. volumes: - name: traefik-logs persistentVolumeClaim: claimName: traefik-logs mountPoints: - name: traefik-logs mountPath: /traefik_logs ## Node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: # to read the traefik logs the pod must be on the same node as traefik podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: # matches labels: app.kubernetes.io/name=traefik - key: app.kubernetes.io/name operator: In values: - traefik topologyKey: "kubernetes.io/hostname" ## Tolerations for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" service: enabled: false type: ClusterIP annotations: {} rbac: # Specifies whether RBAC resources should be created create: true # Create only for the release namespace or cluster wide (Role vs ClusterRole) clusterWide: false # Rules for the created rule rules: [] # When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be # able to scan the pods for scraping labels. The following rules have been taken from: # https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46 # - apiGroups: # - "" # resources: # - nodes # - nodes/proxy # - nodes/metrics # - services # - endpoints # - pods # - ingresses # - configmaps # verbs: # - get # - list # - watch # - apiGroups: # - "extensions" # resources: # - ingresses/status # - ingresses # verbs: # - get # - list # - watch # - nonResourceURLs: # - "/metrics" # verbs: # - get serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # Annotations for the ServiceAccount annotations: {} ## Exposed telegraf configuration ## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` ## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ config: agent: interval: "10s" round_interval: true metric_batch_size: 1000 metric_buffer_limit: 10000 collection_jitter: "0s" flush_interval: "10s" flush_jitter: "0s" precision: "" debug: false quiet: false logfile: "" hostname: "$HOSTNAME" omit_hostname: true # processors: # - enum: # mapping: # field: "status" # dest: "status_code"-+ # value_mappings: # healthy: 1 # problem: 2 # critical: 3 outputs: - influxdb_v2: urls: - "http://influxdb-influxdb2.monitoring:80" token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ== organization: "influxdata" bucket: "kluster" # retention_policy: "2w" inputs: - docker_log: endpoint: "unix:///var/run/docker.sock" from_beginning: false container_name_include: ["traefik"]