diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml
index 6c07e19..9bd3b11 100644
--- a/.github/workflows/checks.yml
+++ b/.github/workflows/checks.yml
@@ -32,6 +32,26 @@ jobs:
echo "changed=true" >> $GITHUB_OUTPUT
fi
+ - name: Install helm-docs
+ run: |
+ GOBIN=/usr/local/bin/ go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.14.2
+
+ - name: Add helm repo for otel
+ run: |
+ helm repo add opentelemetry-collector https://open-telemetry.github.io/opentelemetry-helm-charts
+
+ - name: Build chart dependencies
+ run: |
+ helm dependency build charts/netdata
+
+ - name: Check documentation is up-to-date
+ run: |
+ ./generate-documentation.sh
+ if ! git diff --exit-code charts/netdata/README.md; then
+ echo "::error::README.md is out of date. Please run ./generate-documentation.sh and commit the changes."
+ exit 1
+ fi
+
- name: Run chart-testing (lint)
run: ct lint --check-version-increment=false --validate-maintainers=false --target-branch ${{ github.event.repository.default_branch }}
@@ -41,3 +61,6 @@ jobs:
- name: Run chart-testing (install)
run: ct install --target-branch ${{ github.event.repository.default_branch }}
+
+ - name: Run chart-testing (install with OpenTelemetry)
+ run: ct install --target-branch ${{ github.event.repository.default_branch }} --helm-extra-set-args "--set netdataOpentelemetry.enabled=true --set otel-collector.enabled=true"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a6b53db..482465c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -29,6 +29,19 @@ jobs:
git config --global user.name Netdatabot
git config --global user.email bot@netdata.cloud
+ - name: Set up Helm
+ uses: azure/setup-helm@v4
+ with:
+ version: v3.12.0
+
+ - name: Add helm repo for dependencies
+ run: |
+ helm repo add opentelemetry-collector https://open-telemetry.github.io/opentelemetry-helm-charts
+
+ - name: Build chart dependencies
+ run: |
+ helm dependency build charts/netdata
+
- name: Get current version
id: get_current_var
run: echo "current_version=$(.github/scripts/update_versions.py get_chart_version)" >> $GITHUB_OUTPUT
diff --git a/charts/netdata/Chart.lock b/charts/netdata/Chart.lock
new file mode 100644
index 0000000..aa7d63d
--- /dev/null
+++ b/charts/netdata/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: opentelemetry-collector
+ repository: https://open-telemetry.github.io/opentelemetry-helm-charts
+ version: 0.144.0
+digest: sha256:4386d6f39c3aacd5eeb07d40dcc23404d33001d1c6a90e8843d5fa9bd1b34f24
+generated: "2026-01-29T13:37:43.621411516+01:00"
diff --git a/charts/netdata/Chart.yaml b/charts/netdata/Chart.yaml
index 24d6a59..2217cba 100644
--- a/charts/netdata/Chart.yaml
+++ b/charts/netdata/Chart.yaml
@@ -16,3 +16,10 @@ maintainers:
email: cloud-sre@netdata.cloud
icon: https://netdata.github.io/helmchart/logo.png
appVersion: v2.8.5
+
+dependencies:
+ - name: opentelemetry-collector
+ alias: otel-collector
+ version: "0.144.0"
+ repository: "https://open-telemetry.github.io/opentelemetry-helm-charts"
+ condition: otel-collector.enabled
diff --git a/charts/netdata/README.md b/charts/netdata/README.md
index db3a71b..b92df66 100644
--- a/charts/netdata/README.md
+++ b/charts/netdata/README.md
@@ -2,9 +2,9 @@
-
+
-
+
_Based on the work of varyumin (https://github.com/varyumin/netdata)_.
@@ -67,19 +67,19 @@ Netdata is a comprehensive monitoring solution that requires specific access to
### Required Kubernetes RBAC Resources
-| Resource | Verbs | Components & Descriptions |
+| Resource | Verbs | Components & Descriptions |
|:-------------------|:-----------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| pods | get, list, watch | • **service discovery**: Used for discovering services.
• **go.d/k8s_state**: Kubernetes state monitoring.
• **netdata**: Used by cgroup-name.sh and get-kubernetes-labels.sh scripts. |
-| services | get, list, watch | • **service discovery**: Used for discovering services. |
-| configmaps | get, list, watch | • **service discovery**: Used for discovering services. |
-| secrets | get, list, watch | • **service discovery**: Used for discovering services. |
-| nodes | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
-| nodes/metrics | get, list, watch | • **go.d/k8s_kubelet**: Used when querying Kubelet HTTPS endpoint. |
-| nodes/proxy | get, list, watch | • **netdata**: Used by cgroup-name.sh when querying Kubelet /pods endpoint. |
-| deployments (apps) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
-| cronjobs (batch) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
-| jobs (batch) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
-| namespaces | get | • **go.d/k8s_state**: Kubernetes state monitoring.
• **netdata**: Used by cgroup-name.sh and get-kubernetes-labels.sh scripts. |
+| services | get, list, watch | • **service discovery**: Used for discovering services. |
+| configmaps | get, list, watch | • **service discovery**: Used for discovering services. |
+| secrets | get, list, watch | • **service discovery**: Used for discovering services. |
+| nodes | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
+| nodes/metrics | get, list, watch | • **go.d/k8s_kubelet**: Used when querying Kubelet HTTPS endpoint. |
+| nodes/proxy | get, list, watch | • **netdata**: Used by cgroup-name.sh when querying Kubelet /pods endpoint. |
+| deployments (apps) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
+| cronjobs (batch) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
+| jobs (batch) | get, list, watch | • **go.d/k8s_state**: Kubernetes state monitoring. |
+| namespaces | get | • **go.d/k8s_state**: Kubernetes state monitoring.
• **netdata**: Used by cgroup-name.sh and get-kubernetes-labels.sh scripts. |
@@ -134,175 +134,2045 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the netdata chart and their default values.
-| Parameter | Description | Default |
-|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
-| `kubeVersion` | Kubernetes version | Autodetected |
-| `replicaCount` | Number of `replicas` for the parent netdata `Deployment` | `1` |
-| `imagePullSecrets` | An optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
-| `image.repository` | Container image repo | `netdata/netdata` |
-| `image.tag` | Container image tag | Latest stable netdata release |
-| `image.pullPolicy` | Container image pull policy | `Always` |
-| `service.type` | Parent service type | `ClusterIP` |
-| `service.port` | Parent service port | `19999` |
-| `service.loadBalancerIP` | Static LoadBalancer IP, only to be used with service type=LoadBalancer | `""` |
-| `service.loadBalancerSourceRanges` | List of allowed IPs for LoadBalancer | `[]` |
-| `service.externalTrafficPolicy` | Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints | `Cluster` |
-| `service.healthCheckNodePort` | Specifies the health check node port | Allocated a port from your cluster's NodePort range |
-| `service.clusterIP` | Specific cluster IP when service type is cluster IP. Use `None` for headless service | Allocated an IP from your cluster's service IP range |
-| `service.annotations` | Additional annotations to add to the service | `{}` |
-| `ingress.enabled` | Create Ingress to access the netdata web UI | `true` |
-| `ingress.apiVersion` | apiVersion for the Ingress | Depends on Kubernetes version |
-| `ingress.annotations` | Associate annotations to the Ingress | `kubernetes.io/ingress.class: nginx` and `kubernetes.io/tls-acme: "true"` |
-| `ingress.path` | URL path for the ingress. If changed, a proxy server needs to be configured in front of netdata to translate path from a custom one to a `/` | `/` |
-| `ingress.pathType` | pathType for your ingress contrller. Default value is correct for nginx. If you use yor own ingress controller, check the correct value | `Prefix` |
-| `ingress.hosts` | URL hostnames for the ingress (they need to resolve to the external IP of the ingress controller) | `netdata.k8s.local` |
-| `ingress.spec` | Spec section for ingress object. Everything there will be included into the object on deplyoment | `{}` |
-| `ingress.spec.ingressClassName` | Ingress class declaration for Kubernetes version 1.19+. Annotation ingress.class should be removed if this type of declaration is used | `nginx` |
-| `rbac.create` | if true, create & use RBAC resources | `true` |
-| `rbac.pspEnabled` | Specifies whether a PodSecurityPolicy should be created. | `true` |
-| `serviceAccount.create` | if true, create a service account | `true` |
-| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `netdata` |
-| `clusterrole.name` | Name of the cluster role linked with the service account | `netdata` |
-| `APIKEY` | The key shared between the parent and the child netdata for streaming | `11111111-2222-3333-4444-555555555555` |
-| `restarter.enabled` | Install CronJob to update Netdata Pods | `false` |
-| `restarter.schedule` | The schedule in Cron format | `00 06 * * *` |
-| `restarter.image.repository` | Container image repo | `rancher/kubectl` |
-| `restarter.image.tag` | Container image tag if `.auto`, the image tag version of `rancher/kubectl` will reflect the Kubernetes cluster version | `.auto` |
-| `restarter.image.pullPolicy` | Container image pull policy | `Always` |
-| `restarter.image.restartPolicy` | Container restart policy | `Never` |
-| `restarter.image.resources` | Container resources | `{}` |
-| `restarter.concurrencyPolicy` | Specifies how to treat concurrent executions of a job | `Forbid` |
-| `restarter.startingDeadlineSeconds` | Optional deadline in seconds for starting the job if it misses scheduled time for any reason | `60` |
-| `restarter.successfulJobsHistoryLimit` | The number of successful finished jobs to retain | `3` |
-| `restarter.failedJobsHistoryLimit` | The number of failed finished jobs to retain | `3` |
-| `parent.hostname` | Parent node hostname | `netdata-parent` |
-| `parent.enabled` | Install parent Deployment to receive metrics from children nodes | `true` |
-| `parent.port` | Parent's listen port | `19999` |
-| `parent.resources` | Resources for the parent deployment | `{}` |
-| `parent.livenessProbe.initialDelaySeconds` | Number of seconds after the container has started before liveness probes are initiated | `0` |
-| `parent.livenessProbe.failureThreshold` | When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | `3` |
-| `parent.livenessProbe.periodSeconds` | How often (in seconds) to perform the liveness probe | `30` |
-| `parent.livenessProbe.successThreshold` | Minimum consecutive successes for the liveness probe to be considered successful after having failed | `1` |
-| `parent.livenessProbe.timeoutSeconds` | Number of seconds after which the liveness probe times out | `1` |
-| `parent.readinessProbe.initialDelaySeconds` | Number of seconds after the container has started before readiness probes are initiated | `0` |
-| `parent.readinessProbe.failureThreshold` | When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | `3` |
-| `parent.readinessProbe.periodSeconds` | How often (in seconds) to perform the readiness probe | `30` |
-| `parent.readinessProbe.successThreshold` | Minimum consecutive successes for the readiness probe to be considered successful after having failed | `1` |
-| `parent.readinessProbe.timeoutSeconds` | Number of seconds after which the readiness probe times out | `1` |
-| `parent.terminationGracePeriodSeconds` | Duration in seconds the pod needs to terminate gracefully | `300` |
-| `parent.nodeSelector` | Node selector for the parent deployment | `{}` |
-| `parent.tolerations` | Tolerations settings for the parent deployment | `[]` |
-| `parent.affinity` | Affinity settings for the parent deployment | `{}` |
-| `parent.priorityClassName` | Pod priority class name for the parent deployment | `""` |
-| `parent.database.persistence` | Whether the parent should use a persistent volume for the DB | `true` |
-| `parent.database.storageclass` | The storage class for the persistent volume claim of the parent's database store, mounted to `/var/cache/netdata` | the default storage class |
-| `parent.database.volumesize` | The storage space for the PVC of the parent database | `5Gi` |
-| `parent.alarms.persistence` | Whether the parent should use a persistent volume for the alarms log | `true` |
-| `parent.alarms.storageclass` | The storage class for the persistent volume claim of the parent's alarm log, mounted to `/var/lib/netdata` | the default storage class |
-| `parent.alarms.volumesize` | The storage space for the PVC of the parent alarm log | `1Gi` |
-| `parent.env` | Set environment parameters for the parent deployment | `{}` |
-| `parent.envFrom` | Set environment parameters for the parent deployment from ConfigMap and/or Secrets | `[]` |
-| `parent.podLabels` | Additional labels to add to the parent pods | `{}` |
-| `parent.podAnnotations` | Additional annotations to add to the parent pods | `{}` |
-| `parent.dnsPolicy` | DNS policy for pod | `Default` |
-| `parent.configs` | Manage custom parent's configs | See [Configuration files](#configuration-files). |
-| `parent.claiming.enabled` | Enable parent claiming for netdata cloud | `false` |
-| `parent.claiming.token` | Claim token | `""` |
-| `parent.claiming.room` | Comma separated list of claim rooms IDs | `""` |
-| `parent.securityContext.runAsUser` | The UID to run the container process | `201` |
-| `parent.securityContext.runAsGroup` | The GID to run the container process | `201` |
-| `parent.securityContext.fsGroup` | The supplementary group for setting permissions on volumes | `201` |
-| `parent.extraInitContainers` | Additional init containers to add to the parent pods | `[]` |
-| `parent.extraVolumeMounts` | Additional volumeMounts to add to the parent pods | `[]` |
-| `parent.extraVolumes` | Additional volumes to add to the parent pods | `[]` |
-| `k8sState.hostname` | K8s state node hostname | `netdata-k8s-state` |
-| `k8sState.enabled` | Install this Deployment to gather data from K8s cluster | `true` |
-| `k8sState.port` | Listen port | `service.port` (Same as parent's listen port) |
-| `k8sState.resources` | Compute resources required by this Deployment | `{}` |
-| `k8sState.livenessProbe.initialDelaySeconds` | Number of seconds after the container has started before liveness probes are initiated | `0` |
-| `k8sState.livenessProbe.failureThreshold` | When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | `3` |
-| `k8sState.livenessProbe.periodSeconds` | How often (in seconds) to perform the liveness probe | `30` |
-| `k8sState.livenessProbe.successThreshold` | Minimum consecutive successes for the liveness probe to be considered successful after having failed | `1` |
-| `k8sState.livenessProbe.timeoutSeconds` | Number of seconds after which the liveness probe times out | `1` |
-| `k8sState.readinessProbe.initialDelaySeconds` | Number of seconds after the container has started before readiness probes are initiated | `0` |
-| `k8sState.readinessProbe.failureThreshold` | When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | `3` |
-| `k8sState.readinessProbe.periodSeconds` | How often (in seconds) to perform the readiness probe | `30` |
-| `k8sState.readinessProbe.successThreshold` | Minimum consecutive successes for the readiness probe to be considered successful after having failed | `1` |
-| `k8sState.readinessProbe.timeoutSeconds` | Number of seconds after which the readiness probe times out | `1` |
-| `k8sState.terminationGracePeriodSeconds` | Duration in seconds the pod needs to terminate gracefully | `30` |
-| `k8sState.terminationGracePeriodSeconds` | Duration in seconds the pod needs to terminate gracefully | `300` |
-| `k8sState.nodeSelector` | Node selector | `{}` |
-| `k8sState.tolerations` | Tolerations settings | `[]` |
-| `k8sState.affinity` | Affinity settings | `{}` |
-| `k8sState.priorityClassName` | Pod priority class name | `""` |
-| `k8sState.podLabels` | Additional labels | `{}` |
-| `k8sState.podAnnotations` | Additional annotations | `{}` |
-| `k8sState.podAnnotationAppArmor.enabled` | Whether or not to include the AppArmor security annotation | `true` |
-| `k8sState.dnsPolicy` | DNS policy for pod | `ClusterFirstWithHostNet` |
-| `k8sState.persistence.enabled` | Whether should use a persistent volume for `/var/lib/netdata` | `true` |
-| `k8sState.persistence.storageclass` | The storage class for the persistent volume claim of `/var/lib/netdata` | the default storage class |
-| `k8sState.persistence.volumesize` | The storage space for the PVC of `/var/lib/netdata` | `1Gi` |
-| `k8sState.env` | Set environment parameters | `{}` |
-| `k8sState.envFrom` | Set environment parameters from ConfigMap and/or Secrets | `[]` |
-| `k8sState.configs` | Manage custom configs | See [Configuration files](#configuration-files). |
-| `k8sState.claiming.enabled` | Enable claiming for netdata cloud | `false` |
-| `k8sState.claiming.token` | Claim token | `""` |
-| `k8sState.claiming.room` | Comma separated list of claim rooms IDs | `""` |
-| `k8sState.extraVolumeMounts` | Additional volumeMounts to add to the k8sState pods | `[]` |
-| `k8sState.extraVolumes` | Additional volumes to add to the k8sState pods | `[]` |
-| `child.enabled` | Install child DaemonSet to gather data from nodes | `true` |
-| `child.port` | Children's listen port | `service.port` (Same as parent's listen port) |
-| `child.updateStrategy` | An update strategy to replace existing DaemonSet pods with new pods | `{}` |
-| `child.resources` | Resources for the child DaemonSet | `{}` |
-| `child.livenessProbe.initialDelaySeconds` | Number of seconds after the container has started before liveness probes are initiated | `0` |
-| `child.livenessProbe.failureThreshold` | When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | `3` |
-| `child.livenessProbe.periodSeconds` | How often (in seconds) to perform the liveness probe | `30` |
-| `child.livenessProbe.successThreshold` | Minimum consecutive successes for the liveness probe to be considered successful after having failed | `1` |
-| `child.livenessProbe.timeoutSeconds` | Number of seconds after which the liveness probe times out | `1` |
-| `child.readinessProbe.initialDelaySeconds` | Number of seconds after the container has started before readiness probes are initiated | `0` |
-| `child.readinessProbe.failureThreshold` | When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | `3` |
-| `child.readinessProbe.periodSeconds` | How often (in seconds) to perform the readiness probe | `30` |
-| `child.readinessProbe.successThreshold` | Minimum consecutive successes for the readiness probe to be considered successful after having failed | `1` |
-| `child.readinessProbe.timeoutSeconds` | Number of seconds after which the readiness probe times out | `1` |
-| `child.terminationGracePeriodSeconds` | Duration in seconds the pod needs to terminate gracefully | `30` |
-| `child.nodeSelector` | Node selector for the child daemonsets | `{}` |
-| `child.tolerations` | Tolerations settings for the child daemonsets | `- operator: Exists` with `effect: NoSchedule` |
-| `child.affinity` | Affinity settings for the child daemonsets | `{}` |
-| `child.priorityClassName` | Pod priority class name for the child daemonsets | `""` |
-| `child.env` | Set environment parameters for the child daemonset | `{}` |
-| `child.envFrom` | Set environment parameters for the child daemonset from ConfigMap and/or Secrets | `[]` |
-| `child.podLabels` | Additional labels to add to the child pods | `{}` |
-| `child.podAnnotations` | Additional annotations to add to the child pods | `{}` |
-| `child.hostNetwork` | Usage of host networking and ports | `true` |
-| `child.dnsPolicy` | DNS policy for pod. Should be `ClusterFirstWithHostNet` if `child.hostNetwork = true` | `ClusterFirstWithHostNet` |
-| `child.podAnnotationAppArmor.enabled` | Whether or not to include the AppArmor security annotation | `true` |
-| `child.persistence.hostPath` | Host node directory for storing child instance data | `/var/lib/netdata-k8s-child` |
-| `child.persistence.enabled` | Whether or not to persist `/var/lib/netdata` in the `child.persistence.hostPath`. | `true` |
-| `child.podsMetadata.useKubelet` | Send requests to the Kubelet /pods endpoint instead of Kubernetes API server to get pod metadata | `false` |
-| `child.podsMetadata.kubeletUrl` | Kubelet URL | `https://localhost:10250` |
-| `child.configs` | Manage custom child's configs | See [Configuration files](#configuration-files). |
-| `child.claiming.enabled` | Enable child claiming for netdata cloud | `false` |
-| `child.claiming.token` | Claim token | `""` |
-| `child.claiming.room` | Comma separated list of claim rooms IDs | `""` |
-| `child.extraVolumeMounts` | Additional volumeMounts to add to the child pods | `[]` |
-| `child.extraVolumes` | Additional volumes to add to the child pods | `[]` |
-| `notifications.slack.webhook_url` | Slack webhook URL | `""` |
-| `notifications.slack.recipient` | Slack recipient list | `""` |
-| `initContainersImage.repository` | Init containers' image repository | `alpine` |
-| `initContainersImage.tag` | Init containers' image tag | `latest` |
-| `initContainersImage.pullPolicy` | Init containers' image pull policy | `Always` |
-| `sysctlInitContainer.enabled` | Enable an init container to modify Kernel settings | `false` |
-| `sysctlInitContainer.command` | sysctl init container command to execute | [] |
-| `sysctlInitContainer.resources` | sysctl Init container CPU/Memory resource requests/limits | {} |
-| `sd.image.repository` | Service-discovery image repo | `netdata/agent-sd` |
-| `sd.image.tag` | Service-discovery image tag | Latest stable release (e.g. `v0.2.2`) |
-| `sd.image.pullPolicy` | Service-discovery image pull policy | `Always` |
-| `sd.child.enabled` | Add service-discovery sidecar container to the netdata child pod definition | `true` |
-| `sd.child.resources` | Child service-discovery container CPU/Memory resource requests/limits | `{resources: {limits: {cpu: 50m, memory: 150Mi}, requests: {cpu: 50m, memory: 100Mi}}}` |
-| `sd.child.configmap.name` | Child service-discovery ConfigMap name | `netdata-child-sd-config-map` |
-| `sd.child.configmap.key` | Child service-discovery ConfigMap key | `config.yml` |
-| `sd.child.configmap.from.file` | File to use for child service-discovery configuration generation | `sdconfig/sd-child.yml` |
-| `sd.child.configmap.from.value` | Value to use for child service-discovery configuration generation | `{}` |
+
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| replicaCount | +int | +
+1
+
+ |
+ Number of `replicas` for the parent netdata `Deployment` | +
| deploymentStrategy.type | +string | +
+"Recreate"
+
+ |
+ Deployment strategy for pod deployments. Recreate is the safest value. | +
| imagePullSecrets | +list | +
+[]
+
+ |
+ An optional list of references to secrets in the same namespace to use for pulling any of the images | +
| image.repository | +string | +
+"netdata/netdata"
+
+ |
+ Container image repository | +
| image.tag | +string | +
+"{{ .Chart.AppVersion }}"
+
+ |
+ Container image tag | +
| image.pullPolicy | +string | +
+"Always"
+
+ |
+ Container image pull policy | +
| initContainersImage.repository | +string | +
+"alpine"
+
+ |
+ Init containers' image repository | +
| initContainersImage.tag | +string | +
+"latest"
+
+ |
+ Init containers' image tag | +
| initContainersImage.pullPolicy | +string | +
+"Always"
+
+ |
+ Init containers' image pull policy | +
| sysctlInitContainer.enabled | +bool | +
+false
+
+ |
+ Enable an init container to modify Kernel settings | +
| sysctlInitContainer.command | +list | +
+[]
+
+ |
+ sysctl init container command to execute | +
| sysctlInitContainer.resources | +object | +
+{}
+
+ |
+ sysctl Init container CPU/Memory resource requests/limits | +
| service.type | +string | +
+"ClusterIP"
+
+ |
+ Parent service type | +
| service.port | +int | +
+19999
+
+ |
+ Parent service port | +
| service.annotations | +object | +
+{}
+
+ |
+ Additional annotations to add to the service | +
| service.loadBalancerIP | +string | +
+""
+
+ |
+ Static LoadBalancer IP, only to be used with service type=LoadBalancer | +
| service.loadBalancerSourceRanges | +list | +
+[]
+
+ |
+ List of allowed IPs for LoadBalancer | +
| service.externalTrafficPolicy | +string | +
+""
+
+ |
+ Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints | +
| service.healthCheckNodePort | +string | +
+null
+
+ |
+ Specifies the health check node port (only to be used with type LoadBalancer and external traffic policy Local) | +
| service.clusterIP | +string | +
+""
+
+ |
+ Specific cluster IP when service type is cluster IP. Use `None` for headless service | +
| ingress.enabled | +bool | +
+true
+
+ |
+ Create Ingress to access the netdata web UI | +
| ingress.annotations | +object | ++See values.yaml for defaults ++ |
+ Associate annotations to the Ingress | +
| ingress.path | +string | +
+"/"
+
+ |
+ URL path for the ingress. If changed, a proxy server needs to be configured in front of netdata to translate path from a custom one to a `/` | +
| ingress.pathType | +string | +
+"Prefix"
+
+ |
+ pathType for your ingress controller. Default value is correct for nginx. If you use your own ingress controller, check the correct value | +
| ingress.hosts[0] | +string | +
+"netdata.k8s.local"
+
+ |
+ URL hostnames for the ingress (they need to resolve to the external IP of the ingress controller) | +
| rbac.create | +bool | +
+true
+
+ |
+ if true, create & use RBAC resources | +
| rbac.pspEnabled | +bool | +
+true
+
+ |
+ Specifies whether a PodSecurityPolicy should be created | +
| serviceAccount.create | +bool | +
+true
+
+ |
+ if true, create a service account | +
| serviceAccount.name | +string | +
+"netdata"
+
+ |
+ The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +
| restarter.enabled | +bool | +
+false
+
+ |
+ Install CronJob to update Netdata Pods | +
| restarter.schedule | +string | +
+"00 06 * * *"
+
+ |
+ The schedule in Cron format | +
| restarter.image.repository | +string | +
+"rancher/kubectl"
+
+ |
+ Container image repo | +
| restarter.image.tag | +string | +
+".auto"
+
+ |
+ Container image tag. If `.auto`, the image tag version of the rancher/kubectl will reflect the Kubernetes cluster version | +
| restarter.image.pullPolicy | +string | +
+"Always"
+
+ |
+ Container image pull policy | +
| restarter.restartPolicy | +string | +
+"Never"
+
+ |
+ Container restart policy | +
| restarter.resources | +object | +
+{}
+
+ |
+ Container resources | +
| restarter.concurrencyPolicy | +string | +
+"Forbid"
+
+ |
+ Specifies how to treat concurrent executions of a job | +
| restarter.startingDeadlineSeconds | +int | +
+60
+
+ |
+ Optional deadline in seconds for starting the job if it misses scheduled time for any reason | +
| restarter.successfulJobsHistoryLimit | +int | +
+3
+
+ |
+ The number of successful finished jobs to retain | +
| restarter.failedJobsHistoryLimit | +int | +
+3
+
+ |
+ The number of failed finished jobs to retain | +
| notifications.slack.webhook_url | +string | +
+""
+
+ |
+ Slack webhook URL | +
| notifications.slack.recipient | +string | +
+""
+
+ |
+ Slack recipient list | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| sd.image.repository | +string | +
+"netdata/agent-sd"
+
+ |
+ Container image repository | +
| sd.image.tag | +string | +
+"v0.2.10"
+
+ |
+ Container image tag | +
| sd.image.pullPolicy | +string | +
+"Always"
+
+ |
+ Container image pull policy | +
| sd.child.enabled | +bool | +
+true
+
+ |
+ Add service-discovery sidecar container to the netdata child pod definition | +
| sd.child.configmap.name | +string | +
+"netdata-child-sd-config-map"
+
+ |
+ Child service-discovery ConfigMap name | +
| sd.child.configmap.key | +string | +
+"config.yml"
+
+ |
+ Child service-discovery ConfigMap key | +
| sd.child.configmap.from.file | +string | +
+""
+
+ |
+ File to use for child service-discovery configuration generation | +
| sd.child.configmap.from.value | +object | +
+{}
+
+ |
+ Value to use for child service-discovery configuration generation | +
| sd.child.resources | +object | ++See values.yaml for defaults ++ |
+ Child service-discovery container CPU/Memory resource requests/limits | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| parent.hostname | +string | +
+"netdata-parent"
+
+ |
+ Parent node hostname | +
| parent.enabled | +bool | +
+true
+
+ |
+ Install parent Deployment to receive metrics from children nodes | +
| parent.port | +int | +
+19999
+
+ |
+ Parent's listen port | +
| parent.resources | +object | +
+{}
+
+ |
+ Resources for the parent deployment | +
| parent.livenessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before liveness probes are initiated | +
| parent.livenessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | +
| parent.livenessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the liveness probe | +
| parent.livenessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the liveness probe to be considered successful after having failed | +
| parent.livenessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the liveness probe times out | +
| parent.readinessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before readiness probes are initiated | +
| parent.readinessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | +
| parent.readinessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the readiness probe | +
| parent.readinessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the readiness probe to be considered successful after having failed | +
| parent.readinessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the readiness probe times out | +
| parent.securityContext.runAsUser | +int | +
+201
+
+ |
+ The UID to run the container process | +
| parent.securityContext.runAsGroup | +int | +
+201
+
+ |
+ The GID to run the container process | +
| parent.securityContext.fsGroup | +int | +
+201
+
+ |
+ The supplementary group for setting permissions on volumes | +
| parent.terminationGracePeriodSeconds | +int | +
+300
+
+ |
+ Duration in seconds the pod needs to terminate gracefully | +
| parent.nodeSelector | +object | +
+{}
+
+ |
+ Node selector for the parent deployment | +
| parent.tolerations | +list | +
+[]
+
+ |
+ Tolerations settings for the parent deployment | +
| parent.affinity | +object | +
+{}
+
+ |
+ Affinity settings for the parent deployment | +
| parent.priorityClassName | +string | +
+""
+
+ |
+ Pod priority class name for the parent deployment | +
| parent.env | +object | +
+{}
+
+ |
+ Set environment parameters for the parent deployment | +
| parent.envFrom | +list | +
+[]
+
+ |
+ Set environment parameters for the parent deployment from ConfigMap and/or Secrets | +
| parent.podLabels | +object | +
+{}
+
+ |
+ Additional labels to add to the parent pods | +
| parent.podAnnotations | +object | +
+{}
+
+ |
+ Additional annotations to add to the parent pods | +
| parent.dnsPolicy | +string | +
+"Default"
+
+ |
+ DNS policy for pod | +
| parent.database.persistence | +bool | +
+true
+
+ |
+ Whether the parent should use a persistent volume for the DB | +
| parent.database.storageclass | +string | +
+"-"
+
+ |
+ The storage class for the persistent volume claim of the parent's database store, mounted to `/var/cache/netdata` | +
| parent.database.volumesize | +string | +
+"5Gi"
+
+ |
+ The storage space for the PVC of the parent database | +
| parent.alarms.persistence | +bool | +
+true
+
+ |
+ Whether the parent should use a persistent volume for the alarms log | +
| parent.alarms.storageclass | +string | +
+"-"
+
+ |
+ The storage class for the persistent volume claim of the parent's alarm log, mounted to `/var/lib/netdata` | +
| parent.alarms.volumesize | +string | +
+"1Gi"
+
+ |
+ The storage space for the PVC of the parent alarm log | +
| parent.configs | +object | ++See values.yaml for defaults ++ |
+ Manage custom parent's configs | +
| parent.claiming.enabled | +bool | +
+false
+
+ |
+ Enable parent claiming for netdata cloud | +
| parent.claiming.token | +string | +
+""
+
+ |
+ Claim token | +
| parent.claiming.rooms | +string | +
+""
+
+ |
+ Comma separated list of claim rooms IDs. Empty value = All nodes room only | +
| parent.extraVolumeMounts | +list | +
+[]
+
+ |
+ Additional volumeMounts to add to the parent pods | +
| parent.extraVolumes | +list | +
+[]
+
+ |
+ Additional volumes to add to the parent pods | +
| parent.extraInitContainers | +list | +
+[]
+
+ |
+ Additional init containers to add to the parent pods | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| child.enabled | +bool | +
+true
+
+ |
+ Install child DaemonSet to gather data from nodes | +
| child.port | +string | +
+"{{ .Values.parent.port }}"
+
+ |
+ Children's listen port | +
| child.updateStrategy | +object | +
+{}
+
+ |
+ An update strategy to replace existing DaemonSet pods with new pods | +
| child.resources | +object | +
+{}
+
+ |
+ Resources for the child DaemonSet | +
| child.livenessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before liveness probes are initiated | +
| child.livenessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | +
| child.livenessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the liveness probe | +
| child.livenessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the liveness probe to be considered successful after having failed | +
| child.livenessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the liveness probe times out | +
| child.readinessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before readiness probes are initiated | +
| child.readinessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | +
| child.readinessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the readiness probe | +
| child.readinessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the readiness probe to be considered successful after having failed | +
| child.readinessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the readiness probe times out | +
| child.terminationGracePeriodSeconds | +int | +
+30
+
+ |
+ Duration in seconds the pod needs to terminate gracefully | +
| child.nodeSelector | +object | +
+{}
+
+ |
+ Node selector for the child daemonsets | +
| child.tolerations | +list | ++See values.yaml for defaults ++ |
+ Tolerations settings for the child daemonsets | +
| child.affinity | +object | +
+{}
+
+ |
+ Affinity settings for the child daemonsets | +
| child.priorityClassName | +string | +
+""
+
+ |
+ Pod priority class name for the child daemonsets | +
| child.podLabels | +object | +
+{}
+
+ |
+ Additional labels to add to the child pods | +
| child.podAnnotationAppArmor.enabled | +bool | +
+true
+
+ |
+ Whether or not to include the AppArmor security annotation | +
| child.podAnnotations | +object | +
+{}
+
+ |
+ Additional annotations to add to the child pods | +
| child.hostNetwork | +bool | +
+true
+
+ |
+ Usage of host networking and ports | +
| child.dnsPolicy | +string | +
+"ClusterFirstWithHostNet"
+
+ |
+ DNS policy for pod. Should be `ClusterFirstWithHostNet` if `child.hostNetwork = true` | +
| child.persistence.enabled | +bool | +
+true
+
+ |
+ Whether or not to persist `/var/lib/netdata` in the `child.persistence.hostPath` | +
| child.persistence.hostPath | +string | +
+"/var/lib/netdata-k8s-child"
+
+ |
+ Host node directory for storing child instance data | +
| child.podsMetadata.useKubelet | +bool | +
+false
+
+ |
+ Send requests to the Kubelet /pods endpoint instead of Kubernetes API server to get pod metadata | +
| child.podsMetadata.kubeletUrl | +string | +
+"https://localhost:10250"
+
+ |
+ Kubelet URL | +
| child.configs | +object | ++See values.yaml for defaults ++ |
+ Manage custom child's configs | +
| child.env | +object | +
+{}
+
+ |
+ Set environment parameters for the child daemonset | +
| child.envFrom | +list | +
+[]
+
+ |
+ Set environment parameters for the child daemonset from ConfigMap and/or Secrets | +
| child.claiming.enabled | +bool | +
+false
+
+ |
+ Enable child claiming for netdata cloud | +
| child.claiming.token | +string | +
+""
+
+ |
+ Claim token | +
| child.claiming.rooms | +string | +
+""
+
+ |
+ Comma separated list of claim rooms IDs. Empty value = All nodes room only | +
| child.extraVolumeMounts | +list | +
+[]
+
+ |
+ Additional volumeMounts to add to the child pods | +
| child.extraVolumes | +list | +
+[]
+
+ |
+ Additional volumes to add to the child pods | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| k8sState.hostname | +string | +
+"netdata-k8s-state"
+
+ |
+ K8s state node hostname | +
| k8sState.enabled | +bool | +
+true
+
+ |
+ Install this Deployment to gather data from K8s cluster | +
| k8sState.port | +string | +
+"{{ .Values.parent.port }}"
+
+ |
+ Listen port | +
| k8sState.resources | +object | +
+{}
+
+ |
+ Compute resources required by this Deployment | +
| k8sState.livenessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before liveness probes are initiated | +
| k8sState.livenessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a liveness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the liveness probe means restarting the container | +
| k8sState.livenessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the liveness probe | +
| k8sState.livenessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the liveness probe to be considered successful after having failed | +
| k8sState.livenessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the liveness probe times out | +
| k8sState.readinessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before readiness probes are initiated | +
| k8sState.readinessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a readiness probe fails, Kubernetes will try failureThreshold times before giving up. Giving up the readiness probe means marking the Pod Unready | +
| k8sState.readinessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the readiness probe | +
| k8sState.readinessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the readiness probe to be considered successful after having failed | +
| k8sState.readinessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the readiness probe times out | +
| k8sState.terminationGracePeriodSeconds | +int | +
+30
+
+ |
+ Duration in seconds the pod needs to terminate gracefully | +
| k8sState.nodeSelector | +object | +
+{}
+
+ |
+ Node selector | +
| k8sState.tolerations | +list | +
+[]
+
+ |
+ Tolerations settings | +
| k8sState.affinity | +object | +
+{}
+
+ |
+ Affinity settings | +
| k8sState.priorityClassName | +string | +
+""
+
+ |
+ Pod priority class name | +
| k8sState.podLabels | +object | +
+{}
+
+ |
+ Additional labels | +
| k8sState.podAnnotationAppArmor.enabled | +bool | +
+true
+
+ |
+ Whether or not to include the AppArmor security annotation | +
| k8sState.podAnnotations | +object | +
+{}
+
+ |
+ Additional annotations | +
| k8sState.dnsPolicy | +string | +
+"ClusterFirstWithHostNet"
+
+ |
+ DNS policy for pod | +
| k8sState.persistence.enabled | +bool | +
+true
+
+ |
+ Whether should use a persistent volume for `/var/lib/netdata` | +
| k8sState.persistence.storageclass | +string | +
+"-"
+
+ |
+ The storage class for the persistent volume claim of `/var/lib/netdata` | +
| k8sState.persistence.volumesize | +string | +
+"1Gi"
+
+ |
+ The storage space for the PVC of `/var/lib/netdata` | +
| k8sState.env | +object | +
+{}
+
+ |
+ Set environment parameters | +
| k8sState.envFrom | +list | +
+[]
+
+ |
+ Set environment parameters from ConfigMap and/or Secrets | +
| k8sState.configs | +object | ++See values.yaml for defaults ++ |
+ Manage custom configs | +
| k8sState.claiming.enabled | +bool | +
+false
+
+ |
+ Enable claiming for netdata cloud | +
| k8sState.claiming.token | +string | +
+""
+
+ |
+ Claim token | +
| k8sState.claiming.rooms | +string | +
+""
+
+ |
+ Comma separated list of claim rooms IDs. Empty value = All nodes room only | +
| k8sState.extraVolumeMounts | +list | +
+[]
+
+ |
+ Additional volumeMounts to add to the k8sState pods | +
| k8sState.extraVolumes | +list | +
+[]
+
+ |
+ Additional volumes to add to the k8sState pods | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| netdataOpentelemetry.enabled | +bool | +
+false
+
+ |
+ Enable the Netdata OpenTelemetry Deployment | +
| netdataOpentelemetry.hostname | +string | +
+"netdata-otel"
+
+ |
+ Hostname for the Netdata OpenTelemetry instance | +
| netdataOpentelemetry.port | +string | +
+"{{ .Values.parent.port }}"
+
+ |
+ Listen port | +
| netdataOpentelemetry.service.type | +string | +
+"ClusterIP"
+
+ |
+ Service type | +
| netdataOpentelemetry.service.port | +int | +
+4317
+
+ |
+ Service port | +
| netdataOpentelemetry.service.annotations | +object | +
+{}
+
+ |
+ Service annotations | +
| netdataOpentelemetry.service.clusterIP | +string | +
+""
+
+ |
+ Cluster IP address (only used with service.type ClusterIP) | +
| netdataOpentelemetry.service.loadBalancerIP | +string | +
+""
+
+ |
+ LoadBalancer IP address (only used with service.type LoadBalancer) | +
| netdataOpentelemetry.service.loadBalancerSourceRanges | +list | +
+[]
+
+ |
+ Allowed source ranges for LoadBalancer (only used with service.type LoadBalancer) | +
| netdataOpentelemetry.service.externalTrafficPolicy | +string | +
+""
+
+ |
+ External traffic policy (only used with service.type LoadBalancer) | +
| netdataOpentelemetry.service.healthCheckNodePort | +string | +
+""
+
+ |
+ Health check node port (only used with service.type LoadBalancer and external traffic policy Local) | +
| netdataOpentelemetry.resources | +object | +
+{}
+
+ |
+ Compute resources required by this Deployment | +
| netdataOpentelemetry.livenessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before liveness probes are initiated | +
| netdataOpentelemetry.livenessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a liveness probe fails, Kubernetes will try failureThreshold times before giving up | +
| netdataOpentelemetry.livenessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the liveness probe | +
| netdataOpentelemetry.livenessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the liveness probe to be considered successful after having failed | +
| netdataOpentelemetry.livenessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the liveness probe times out | +
| netdataOpentelemetry.readinessProbe.initialDelaySeconds | +int | +
+0
+
+ |
+ Number of seconds after the container has started before readiness probes are initiated | +
| netdataOpentelemetry.readinessProbe.failureThreshold | +int | +
+3
+
+ |
+ When a readiness probe fails, Kubernetes will try failureThreshold times before giving up | +
| netdataOpentelemetry.readinessProbe.periodSeconds | +int | +
+30
+
+ |
+ How often (in seconds) to perform the readiness probe | +
| netdataOpentelemetry.readinessProbe.successThreshold | +int | +
+1
+
+ |
+ Minimum consecutive successes for the readiness probe to be considered successful after having failed | +
| netdataOpentelemetry.readinessProbe.timeoutSeconds | +int | +
+1
+
+ |
+ Number of seconds after which the readiness probe times out | +
| netdataOpentelemetry.securityContext.runAsUser | +int | +
+201
+
+ |
+ The UID to run the container process | +
| netdataOpentelemetry.securityContext.runAsGroup | +int | +
+201
+
+ |
+ The GID to run the container process | +
| netdataOpentelemetry.securityContext.fsGroup | +int | +
+201
+
+ |
+ The supplementary group for setting permissions on volumes | +
| netdataOpentelemetry.terminationGracePeriodSeconds | +int | +
+30
+
+ |
+ Duration in seconds the pod needs to terminate gracefully | +
| netdataOpentelemetry.nodeSelector | +object | +
+{}
+
+ |
+ Node selector | +
| netdataOpentelemetry.tolerations | +list | +
+[]
+
+ |
+ Tolerations settings | +
| netdataOpentelemetry.affinity | +object | +
+{}
+
+ |
+ Affinity settings | +
| netdataOpentelemetry.priorityClassName | +string | +
+""
+
+ |
+ Pod priority class name | +
| netdataOpentelemetry.podLabels | +object | +
+{}
+
+ |
+ Additional labels | +
| netdataOpentelemetry.podAnnotationAppArmor.enabled | +bool | +
+true
+
+ |
+ Whether or not to include the AppArmor security annotation | +
| netdataOpentelemetry.podAnnotations | +object | +
+{}
+
+ |
+ Additional annotations | +
| netdataOpentelemetry.dnsPolicy | +string | +
+"Default"
+
+ |
+ DNS policy for pod | +
| netdataOpentelemetry.persistence.enabled | +bool | +
+true
+
+ |
+ Whether should use a persistent volume | +
| netdataOpentelemetry.persistence.storageclass | +string | +
+"-"
+
+ |
+ The storage class for the persistent volume claim | +
| netdataOpentelemetry.persistence.volumesize | +string | +
+"10Gi"
+
+ |
+ The storage space for the PVC | +
| netdataOpentelemetry.configs | +object | ++See values.yaml for defaults ++ |
+ Manage custom configs | +
| netdataOpentelemetry.env | +object | +
+{}
+
+ |
+ Set environment parameters | +
| netdataOpentelemetry.envFrom | +list | +
+[]
+
+ |
+ Set environment parameters from ConfigMap and/or Secrets | +
| netdataOpentelemetry.claiming.enabled | +bool | +
+false
+
+ |
+ Enable claiming for netdata cloud | +
| netdataOpentelemetry.claiming.token | +string | +
+""
+
+ |
+ Claim token | +
| netdataOpentelemetry.claiming.rooms | +string | +
+""
+
+ |
+ Comma separated list of claim rooms IDs. Empty value = All nodes room only | +
| netdataOpentelemetry.extraVolumeMounts | +list | +
+[]
+
+ |
+ Additional volumeMounts | +
| netdataOpentelemetry.extraVolumes | +list | +
+[]
+
+ |
+ Additional volumes | +
| Key | +Type | +Default | +Description | + + +
|---|---|---|---|
| otel-collector.enabled | +bool | +
+false
+
+ |
+ Set to true to enable the OpenTelemetry Collector | +
| otel-collector.mode | +string | +
+"daemonset"
+
+ |
+ Deployment mode: daemonset, deployment, or statefulset | +
| otel-collector.image.repository | +string | +
+"otel/opentelemetry-collector-k8s"
+
+ |
+ Image repository | +
| otel-collector.presets.kubernetesAttributes.enabled | +bool | +
+true
+
+ |
+ Enable Kubernetes attributes collection | +
| otel-collector.presets.logsCollection.enabled | +bool | +
+true
+
+ |
+ Enable logs collection from Kubernetes pods | +
| otel-collector.presets.logsCollection.includeCollectorLogs | +bool | +
+false
+
+ |
+ Include collector logs in the collection | +
| otel-collector.config | +object | ++See values.yaml for defaults ++ |
+ OpenTelemetry Collector configuration | +
| otel-collector.resources | +object | ++See values.yaml for defaults ++ |
+ Resources | +
| otel-collector.serviceAccount.create | +bool | +
+true
+
+ |
+ Create service account | +
| otel-collector.clusterRole.create | +bool | +
+true
+
+ |
+ Create cluster role | +
| otel-collector.clusterRole.rules | +list | ++See values.yaml for defaults ++ |
+ Cluster role rules | +
| otel-collector.tolerations | +list | ++See values.yaml for defaults ++ |
+ Tolerations to run on all nodes | +