Initial commit

This commit is contained in:
2025-12-09 19:34:54 +11:00
commit a4d98eea50
894 changed files with 131646 additions and 0 deletions

View File

@@ -0,0 +1,29 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
README.md.tmpl
# Nix Shell
*.nix
kubectl-plugin/
# Chart Metadata
images.txt

View File

@@ -0,0 +1,24 @@
dependencies:
- name: crds
repository: ""
version: 2.10.0
- name: etcd
repository: https://charts.bitnami.com/bitnami
version: 12.0.14
- name: jaeger-operator
repository: https://jaegertracing.github.io/helm-charts
version: 2.50.1
- name: loki
repository: https://grafana.github.io/helm-charts
version: 6.29.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 1.0.1
- name: nats
repository: https://nats-io.github.io/k8s/helm/charts/
version: 0.19.14
- name: localpv-provisioner
repository: https://openebs.github.io/dynamic-localpv-provisioner
version: 4.4.0
digest: sha256:3f8b564dcddd40fc908991e99b222abb4322d55f4d285a105cae6c9ae0c8b006
generated: "2025-11-19T16:41:00.052587499Z"

103
charts/mayastor/Chart.yaml Normal file
View File

@@ -0,0 +1,103 @@
annotations:
helm.sh/images: |
- name: alloy
image: docker.io/grafana/alloy:v1.8.1
- name: loki
image: docker.io/grafana/loki:3.4.2
- name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.30.2
- name: nats
image: docker.io/nats:2.9.17-alpine
- name: nats-box
image: docker.io/natsio/nats-box:0.13.8
- name: nats-server-config-reloader
image: docker.io/natsio/nats-server-config-reloader:0.10.1
- name: prometheus-nats-exporter
image: docker.io/natsio/prometheus-nats-exporter:0.11.0
- name: alpine-bash
image: docker.io/openebs/alpine-bash:4.3.0
- name: alpine-sh
image: docker.io/openebs/alpine-sh:4.3.0
- name: etcd
image: docker.io/openebs/etcd:3.6.4-debian-12-r0
- name: kubectl
image: docker.io/openebs/kubectl:1.25.15
- name: linux-utils
image: docker.io/openebs/linux-utils:4.3.0
- name: mayastor-agent-core
image: docker.io/openebs/mayastor-agent-core:v2.10.0
- name: mayastor-agent-ha-cluster
image: docker.io/openebs/mayastor-agent-ha-cluster:v2.10.0
- name: mayastor-agent-ha-node
image: docker.io/openebs/mayastor-agent-ha-node:v2.10.0
- name: mayastor-api-rest
image: docker.io/openebs/mayastor-api-rest:v2.10.0
- name: mayastor-csi-controller
image: docker.io/openebs/mayastor-csi-controller:v2.10.0
- name: mayastor-csi-node
image: docker.io/openebs/mayastor-csi-node:v2.10.0
- name: mayastor-io-engine
image: docker.io/openebs/mayastor-io-engine:v2.10.0
- name: mayastor-metrics-exporter-io-engine
image: docker.io/openebs/mayastor-metrics-exporter-io-engine:v2.10.0
- name: mayastor-obs-callhome-stats
image: docker.io/openebs/mayastor-obs-callhome-stats:v2.10.0
- name: mayastor-obs-callhome
image: docker.io/openebs/mayastor-obs-callhome:v2.10.0
- name: mayastor-operator-diskpool
image: docker.io/openebs/mayastor-operator-diskpool:v2.10.0
- name: provisioner-localpv
image: docker.io/openebs/provisioner-localpv:4.4.0
- name: mc
image: quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z
- name: minio
image: quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z
- name: prometheus-config-reloader
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:v4.8.1
- name: csi-node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.2.0
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.13.2
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0
- name: snapshot-controller
image: registry.k8s.io/sig-storage/snapshot-controller:v8.2.0
apiVersion: v2
appVersion: 2.10.0
dependencies:
- condition: crds.enabled
name: crds
repository: ""
version: 2.10.0
- condition: etcd.enabled
name: etcd
repository: https://charts.bitnami.com/bitnami
version: 12.0.14
- condition: base.jaeger.enabled
name: jaeger-operator
repository: https://jaegertracing.github.io/helm-charts
version: 2.50.1
- condition: loki.enabled
name: loki
repository: https://grafana.github.io/helm-charts
version: 6.29.0
- condition: alloy.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 1.0.1
- condition: eventing.enabled
name: nats
repository: https://nats-io.github.io/k8s/helm/charts/
version: 0.19.14
- condition: localpv-provisioner.enabled
name: localpv-provisioner
repository: https://openebs.github.io/dynamic-localpv-provisioner
version: 4.4.0
description: Mayastor Helm chart for Kubernetes
name: mayastor
type: application
version: 2.10.0

246
charts/mayastor/README.md Normal file
View File

@@ -0,0 +1,246 @@
# mayastor
Mayastor Helm chart for Kubernetes
![Version: 2.10.0](https://img.shields.io/badge/Version-2.10.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.10.0](https://img.shields.io/badge/AppVersion-2.10.0-informational?style=flat-square)
## Installation Guide
### Prerequisites
- Make sure the [system requirement pre-requisites](https://mayastor.gitbook.io/introduction/quickstart/prerequisites) are met.
- Label the storage nodes same as the mayastor.nodeSelector in values.yaml
- Create the namespace you want the chart to be installed, or pass the `--create-namespace` flag in the `helm install` command.
```sh
kubectl create ns <mayastor-namespace>
```
- Create secret if downloading the container images from a private repo.
```sh
kubectl create secret docker-registry <same-as-image.pullSecrets[0]> --docker-server="https://index.docker.io/v1/" --docker-username="<user-name>" --docker-password="<password>" --docker-email="<user-email>" -n <mayastor-namespace>
```
### Installing the chart via the git repo
Clone the mayastor charts repo.
Sync the chart dependencies
```console
$ helm dependency update
```
Install the mayastor chart using the command.
```console
$ helm install mayastor . -n <mayastor-namespace>
```
### Installing the Chart via Helm Registry
To install the chart with the release name `mymayastor`:
```console
$ helm repo add mayastor https://openebs.github.io/mayastor-extensions/
$ helm install mymayastor mayastor/mayastor
```
### Uninstall Helm Chart
```console
$ helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
*See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation.*
## Chart Dependencies
| Repository | Name | Version |
|------------|------|---------|
| | crds | 2.10.0 |
| https://charts.bitnami.com/bitnami | etcd | 12.0.14 |
| https://grafana.github.io/helm-charts | alloy | 1.0.1 |
| https://grafana.github.io/helm-charts | loki | 6.29.0 |
| https://jaegertracing.github.io/helm-charts | jaeger-operator | 2.50.1 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 0.19.14 |
| https://openebs.github.io/dynamic-localpv-provisioner | localpv-provisioner | 4.4.0 |
## Values
| Key | Description | Default |
|:----|:------------|:--------|
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;allowNonPersistentDevlink | Allow using non-persistent kernel devpaths for pool disks. Enabling this will let users to use the kernel devpaths e.g /dev/sda, for diskpools. However, this comes with associated risks if the devpaths get swapped among disks, resulting in total data loss especially if encryption is being used. | `false` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;capacity.&ZeroWidthSpace;thin.&ZeroWidthSpace;poolCommitment | The allowed pool commitment limit when dealing with thin provisioned volumes. Example: If the commitment is 250 and the pool is 10GiB we can overcommit the pool up to 25GiB (create 2 10GiB and 1 5GiB volume) but no further. | `"250%"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;capacity.&ZeroWidthSpace;thin.&ZeroWidthSpace;snapshotCommitment | When creating snapshots for an existing volume, each replica pool must have at least this much free space percentage of the volume size. Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed to be snapped on the pool is 100GiB. | `"40%"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;capacity.&ZeroWidthSpace;thin.&ZeroWidthSpace;volumeCommitment | When creating replicas for an existing volume, each replica pool must have at least this much free space percentage of the volume size. Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed to be created on the pool is 100GiB. | `"40%"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;capacity.&ZeroWidthSpace;thin.&ZeroWidthSpace;volumeCommitmentInitial | Same as the `volumeCommitment` argument, but applicable only when creating replicas for a new volume. | `"40%"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;encryptedPoolsSoftScheduling | Prefer encrypted pools for volume replicas. If a volume wasn't provisioned with a encryption storageclass, we try to place the replicas of such volume on best-effort basis onto encrypted pools, if this global is set. This is effective subject to volume spec already modified via plugin to request encryption. | `false` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;logLevel | Log level for the core service | `"info"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;minTimeouts | Enable minimal timeouts | `true` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;poolClusterSize | Default blobstore cluster size for diskpools, in bytes. This value is used as a default value of blobstore cluster size on diskpools. This is set to 4MiB internally by default, if nothing specified here. The value is also configurable via Diskpool CR, which takes precedence over this setting. This is an advanced configuration, please refer documentation to understand the usage and implications of this. | `""` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global. If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default. Refer the `templates/_helpers.tpl` and `templates/mayastor/agents/core/agent-core-deployment.yaml` for more details. | `""` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;rebuild.&ZeroWidthSpace;maxConcurrent | The maximum number of system-wide rebuilds permitted at any given time. If set to an empty string, there are no limits. | `""` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;rebuild.&ZeroWidthSpace;partial.&ZeroWidthSpace;enabled | Partial rebuild uses a log of missed IO to rebuild replicas which have become temporarily faulted, hence a bit faster, depending on the log size. | `true` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;rebuild.&ZeroWidthSpace;partial.&ZeroWidthSpace;waitPeriod | If a faulted replica comes back online within this time period then it will be rebuilt using the partial rebuild capability. Otherwise, the replica will be fully rebuilt. A blank value "" means internally derived value will be used. | `""` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;requestTimeout | Request timeout for core agents Default value is defined in .base.default_req_timeout | `nil` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for core agents | `"1000m"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for core agents | `"128Mi"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for core agents | `"500m"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for core agents | `"32Mi"` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| agents.&ZeroWidthSpace;core.&ZeroWidthSpace;volumeHealth | Enable extended volume health information, which helps generate the volume status more accurately. | `true` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;cluster.&ZeroWidthSpace;logLevel | Log level for the ha cluster service | `"info"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;cluster.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for ha cluster agent | `"100m"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;cluster.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for ha cluster agent | `"64Mi"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;cluster.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for ha cluster agent | `"100m"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;cluster.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for ha cluster agent | `"16Mi"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;logLevel | Log level for the ha node service | `"info"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;port | Container port for the ha-node service | `50053` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for ha node agent | `"100m"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for ha node agent | `"64Mi"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for ha node agent | `"100m"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for ha node agent | `"64Mi"` |
| agents.&ZeroWidthSpace;ha.&ZeroWidthSpace;node.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| alloy.&ZeroWidthSpace;logging_config.&ZeroWidthSpace;labels | Labels to enable scraping on, at-least one of these labels should be present. | <pre>{<br>"openebs.io/logging":true<br>}</pre> |
| alloy.&ZeroWidthSpace;logging_config.&ZeroWidthSpace;tenant_id | X-Scope-OrgID to pe populated which pushing logs. Make sure the caller also uses the same. | `"openebs"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;liveness.&ZeroWidthSpace;enabled | Toggle liveness probe. | `true` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;liveness.&ZeroWidthSpace;failureThreshold | No. of failures the liveness probe will tolerate. | `1` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;liveness.&ZeroWidthSpace;initialDelaySeconds | No. of seconds of delay before checking the liveness status. | `0` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;liveness.&ZeroWidthSpace;periodSeconds | No. of seconds between liveness probe checks. | `30` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;liveness.&ZeroWidthSpace;timeoutSeconds | No. of seconds of timeout tolerance. | `5` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;agentCoreProbeFreq | Frequency for the agent-core liveness probe. | `"20s"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;enabled | Toggle readiness probe. | `true` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;failureThreshold | No. of failures the readiness probe will tolerate. | `2` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;initialDelaySeconds | No. of seconds of delay before checking the readiness status. | `0` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;periodSeconds | No. of seconds between readiness probe checks. | `20` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;healthProbes.&ZeroWidthSpace;readiness.&ZeroWidthSpace;timeoutSeconds | No. of seconds of timeout tolerance. | `5` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;logLevel | Log level for the rest service | `"info"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global. If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default. Refer the `templates/_helpers.tpl` and `templates/mayastor/apis/rest/api-rest-deployment.yaml` for more details. | `""` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;replicaCount | Number of replicas of rest | `1` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for rest | `"100m"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for rest | `"64Mi"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for rest | `"50m"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for rest | `"32Mi"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;service.&ZeroWidthSpace;type | Rest K8s service type | `"ClusterIP"` |
| apis.&ZeroWidthSpace;rest.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| base.&ZeroWidthSpace;cache_poll_period | Cache timeout for core agent & diskpool deployment | `"30s"` |
| base.&ZeroWidthSpace;default_req_timeout | Request timeout for rest & core agents | `"5s"` |
| base.&ZeroWidthSpace;logging.&ZeroWidthSpace;color | Enable ansi color code for Pod StdOut/StdErr | `true` |
| base.&ZeroWidthSpace;logging.&ZeroWidthSpace;format | Valid values for format are pretty, json and compact | `"pretty"` |
| base.&ZeroWidthSpace;logging.&ZeroWidthSpace;silenceLevel | Silence specific module components | `nil` |
| base.&ZeroWidthSpace;metrics.&ZeroWidthSpace;enabled | Enable the metrics exporter | `true` |
| base.&ZeroWidthSpace;metrics.&ZeroWidthSpace;port | Container port for the metrics exporter service | `9502` |
| crds.&ZeroWidthSpace;csi.&ZeroWidthSpace;volumeSnapshots.&ZeroWidthSpace;enabled | Install Volume Snapshot CRDs | `true` |
| crds.&ZeroWidthSpace;enabled | Disables the installation of all CRDs if set to false | `true` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;logLevel | Log level for the csi controller | `"info"` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;preventVolumeModeConversion | Prevent modifying the volume mode when creating a PVC from an existing VolumeSnapshot | `true` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for csi controller | `"32m"` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for csi controller | `"128Mi"` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for csi controller | `"16m"` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for csi controller | `"64Mi"` |
| csi.&ZeroWidthSpace;controller.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;attacherTag | csi-attacher image release tag | `"v4.8.1"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;provisionerTag | csi-provisioner image release tag | `"v5.2.0"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;pullPolicy | imagePullPolicy for all CSI Sidecar images | `"IfNotPresent"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;registrarTag | csi-node-driver-registrar image release tag | `"v2.13.0"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;registry | Image registry to pull all CSI Sidecar images | `"registry.k8s.io"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;repo | Image registry's namespace | `"sig-storage"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;resizerTag | csi-resizer image release tag | `"v1.13.2"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;snapshotControllerTag | csi-snapshot-controller image release tag | `"v8.2.0"` |
| csi.&ZeroWidthSpace;image.&ZeroWidthSpace;snapshotterTag | csi-snapshotter image release tag | `"v8.2.0"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;kubeletDir | The kubeletDir directory for the csi-node plugin | `"/var/lib/kubelet"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;nvme.&ZeroWidthSpace;ctrl_loss_tmo | The ctrl_loss_tmo (controller loss timeout) in seconds | `"1980"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;nvme.&ZeroWidthSpace;tcpFallback | Fallback to nvme-tcp if nvme-rdma is enabled for Mayastor but rdma is not available on a particular csi-node | `true` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;port | Container port for the csi-node service | `10199` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for csi node plugin | `"100m"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for csi node plugin | `"128Mi"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for csi node plugin | `"100m"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for csi node plugin | `"64Mi"` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| csi.&ZeroWidthSpace;node.&ZeroWidthSpace;topology.&ZeroWidthSpace;nodeSelector | Add topology segments to the csi-node and agent-ha-node daemonset node selector | `false` |
| etcd.&ZeroWidthSpace;autoCompactionMode | AutoCompaction Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Auto compaction mode. Valid values: "periodic", "revision". - 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. 5m). - 'revision' for revision number based retention. | `"revision"` |
| etcd.&ZeroWidthSpace;autoCompactionRetention | Auto compaction retention length. 0 means disable auto compaction. | `"100"` |
| etcd.&ZeroWidthSpace;clusterDomain | Kubernetes Cluster Domain | `"cluster.local"` |
| etcd.&ZeroWidthSpace;enabled | Disable when using an external etcd cluster. | `true` |
| etcd.&ZeroWidthSpace;externalUrl | Url of the external etcd cluster. Note, etcd.enable must be set to false. | `""` |
| etcd.&ZeroWidthSpace;extraEnvVars[0] | Raise alarms when backend size exceeds the given quota. | <pre>{<br>"name":"ETCD_QUOTA_BACKEND_BYTES",<br>"value":"8589934592"<br>}</pre> |
| etcd.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;basePath | Host path where local etcd data is stored in. | `"/var/local/{{ .Release.Name }}/localpv-hostpath/etcd"` |
| etcd.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;reclaimPolicy | ReclaimPolicy of etcd's localpv hostpath storage class. | `"Delete"` |
| etcd.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;volumeBindingMode | VolumeBindingMode of etcd's localpv hostpath storage class. | `"WaitForFirstConsumer"` |
| etcd.&ZeroWidthSpace;persistence.&ZeroWidthSpace;enabled | If true, use a Persistent Volume Claim. If false, use emptyDir. | `true` |
| etcd.&ZeroWidthSpace;persistence.&ZeroWidthSpace;size | Volume size | `"2Gi"` |
| etcd.&ZeroWidthSpace;persistence.&ZeroWidthSpace;storageClass | Will define which storageClass to use in etcd's StatefulSets. Options: <p> - `"manual"` - Will provision a hostpath PV on the same node. <br> - `""` (empty) - Will use the default StorageClass on the cluster. </p> | `"mayastor-etcd-localpv"` |
| etcd.&ZeroWidthSpace;persistentVolumeClaimRetentionPolicy.&ZeroWidthSpace;enabled | PVC's reclaimPolicy | `false` |
| etcd.&ZeroWidthSpace;podAntiAffinityPreset | Pod anti-affinity preset Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity | `"hard"` |
| etcd.&ZeroWidthSpace;removeMemberOnContainerTermination | Use a PreStop hook to remove the etcd members from the etcd cluster on container termination Ignored if lifecycleHooks is set or replicaCount=1 | `false` |
| etcd.&ZeroWidthSpace;replicaCount | Number of replicas of etcd | `3` |
| image.&ZeroWidthSpace;pullPolicy | ImagePullPolicy for our images | `"IfNotPresent"` |
| image.&ZeroWidthSpace;pullSecrets | docker-secrets required to pull images if the container registry from image.registry is protected | `[]` |
| image.&ZeroWidthSpace;registry | Image registry to pull our product images | `"docker.io"` |
| image.&ZeroWidthSpace;repo | Image registry's namespace | `"openebs"` |
| image.&ZeroWidthSpace;tag | Release tag for our images | `"v2.10.0"` |
| io_engine.&ZeroWidthSpace;coreList | If not empty, overrides the cpuCount and explicitly sets the list of cores. Example: --set='io_engine.coreList={30,31}' | `[]` |
| io_engine.&ZeroWidthSpace;cpuCount | The number of cores that each io-engine instance will bind to. | `"2"` |
| io_engine.&ZeroWidthSpace;envcontext | Pass additional arguments to the Environment Abstraction Layer. Example: --set {product}.envcontext=iova-mode=pa | `""` |
| io_engine.&ZeroWidthSpace;logLevel | Log level for the io-engine service | `"info"` |
| io_engine.&ZeroWidthSpace;nodeSelector | Node selectors to designate storage nodes for diskpool creation Note that if multi-arch images support 'kubernetes.io/arch: amd64' should be removed. | <pre>{<br>"kubernetes.io/arch":"amd64",<br>"openebs.io/engine":"mayastor"<br>}</pre> |
| io_engine.&ZeroWidthSpace;nvme.&ZeroWidthSpace;ioTimeout | Timeout for IOs The default here is exaggerated for local disks, but we've observed that in shared virtual environments having a higher timeout value is beneficial. Please adjust this according to your hardware and needs. | `"110s"` |
| io_engine.&ZeroWidthSpace;nvme.&ZeroWidthSpace;tcp.&ZeroWidthSpace;maxQueueDepth | You may need to increase this for a higher outstanding IOs per volume | `"32"` |
| io_engine.&ZeroWidthSpace;port | Container port for the io-engine service | `10124` |
| io_engine.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| io_engine.&ZeroWidthSpace;pstorRetries | Number of retries for pstor persistence before the volume target self shutdowns | `300` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for the io-engine | `""` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;hugepages1Gi | Hugepage memory in 1GiB chunks | `nil` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;hugepages2Mi | Hugepage memory in 2MiB chunks | `"2Gi"` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for the io-engine | `"1Gi"` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for the io-engine | `""` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;hugepages1Gi | Hugepage memory in 1GiB chunks | `nil` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;hugepages2Mi | Hugepage memory in 2MiB chunks | `"2Gi"` |
| io_engine.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for the io-engine | `"1Gi"` |
| io_engine.&ZeroWidthSpace;runtimeClassName | Runtime class to use. Defaults to cluster standard | `""` |
| io_engine.&ZeroWidthSpace;target.&ZeroWidthSpace;nvmf.&ZeroWidthSpace;iface | NVMF target interface (ip, mac, name or subnet) If RDMA is enabled, please set iface to an RDMA capable netdev name from host network. Example, if an rdma device mlx5_0 is available on a netdev eth0 on RNIC, as can be seen from `rdma link` command output, then this field should be set to eth0. | `""` |
| io_engine.&ZeroWidthSpace;target.&ZeroWidthSpace;nvmf.&ZeroWidthSpace;ptpl | Reservations Persist Through Power Loss State | `true` |
| io_engine.&ZeroWidthSpace;target.&ZeroWidthSpace;nvmf.&ZeroWidthSpace;rdma | Enable RDMA Capability of Mayastor nvmf target to take RDMA connections if the cluster nodes have RDMA device(s) configured from RNIC. | <pre>{<br>"enabled":false<br>}</pre> |
| io_engine.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| localpv-provisioner.&ZeroWidthSpace;enabled | Enables the openebs dynamic-localpv-provisioner. If disabled, modify etcd and loki storage class accordingly. | `true` |
| localpv-provisioner.&ZeroWidthSpace;hostpathClass.&ZeroWidthSpace;enabled | Enable default hostpath localpv StorageClass. | `false` |
| localpv-provisioner.&ZeroWidthSpace;localpv.&ZeroWidthSpace;priorityClassName | Set the PriorityClass for the LocalPV Hostpath provisioner Deployment. | `"{{ .Release.Name }}-cluster-critical"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;loki.&ZeroWidthSpace;basePath | Host path where local loki data is stored in. | `"/var/local/{{ .Release.Name }}/localpv-hostpath/loki"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;loki.&ZeroWidthSpace;reclaimPolicy | ReclaimPolicy of loki's localpv hostpath storage class. | `"Delete"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;loki.&ZeroWidthSpace;volumeBindingMode | VolumeBindingMode of loki's localpv hostpath storage class. | `"WaitForFirstConsumer"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;minio.&ZeroWidthSpace;basePath | Host path where local minio data is stored in. | `"/var/local/{{ .Release.Name }}/localpv-hostpath/minio"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;minio.&ZeroWidthSpace;reclaimPolicy | ReclaimPolicy of minio's localpv hostpath storage class. | `"Delete"` |
| loki.&ZeroWidthSpace;localpvScConfig.&ZeroWidthSpace;minio.&ZeroWidthSpace;volumeBindingMode | VolumeBindingMode of minio's localpv hostpath storage class. | `"WaitForFirstConsumer"` |
| nodeSelector | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Note that if multi-arch images support 'kubernetes.io/arch: amd64' should be removed and set 'nodeSelector' to empty '{}' as default value. | <pre>{<br>"kubernetes.io/arch":"amd64"<br>}</pre> |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;enabled | Enable callhome | `true` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;logLevel | Log level for callhome | `"info"` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for callhome | `"100m"` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for callhome | `"32Mi"` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for callhome | `"50m"` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for callhome | `"16Mi"` |
| obs.&ZeroWidthSpace;callhome.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;logLevel | Log level for stats | `"info"` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for stats | `"100m"` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for stats | `"32Mi"` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for stats | `"50m"` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for stats | `"16Mi"` |
| obs.&ZeroWidthSpace;stats.&ZeroWidthSpace;service.&ZeroWidthSpace;type | Rest K8s service type | `"ClusterIP"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;logLevel | Log level for diskpool operator service | `"info"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;priorityClassName | Set PriorityClass, overrides global | `""` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;cpu | Cpu limits for diskpool operator | `"100m"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;resources.&ZeroWidthSpace;limits.&ZeroWidthSpace;memory | Memory limits for diskpool operator | `"32Mi"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;cpu | Cpu requests for diskpool operator | `"50m"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;resources.&ZeroWidthSpace;requests.&ZeroWidthSpace;memory | Memory requests for diskpool operator | `"16Mi"` |
| operators.&ZeroWidthSpace;pool.&ZeroWidthSpace;tolerations | Set tolerations, overrides global | `[]` |
| preUpgradeHook.&ZeroWidthSpace;enabled | Enable/Disable mayastor pre-upgrade hook | `true` |
| preUpgradeHook.&ZeroWidthSpace;image.&ZeroWidthSpace;pullPolicy | The imagePullPolicy for the container | `"IfNotPresent"` |
| preUpgradeHook.&ZeroWidthSpace;image.&ZeroWidthSpace;registry | The container image registry URL for the hook job | `"docker.io"` |
| preUpgradeHook.&ZeroWidthSpace;image.&ZeroWidthSpace;repo | The container repository for the hook job | `"openebs/kubectl"` |
| preUpgradeHook.&ZeroWidthSpace;image.&ZeroWidthSpace;tag | The container image tag for the hook job | `"1.25.15"` |
| preUpgradeHook.&ZeroWidthSpace;imagePullSecrets | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | `[]` |
| preUpgradeHook.&ZeroWidthSpace;tolerations | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | `[]` |
| priorityClassName | Pod scheduling priority. Setting this value will apply to all components except the external Chart dependencies. If any component has `priorityClassName` set, then this value would be overridden for that component. For external components like etcd, jaeger or loki, PriorityClass can only be set at component level. | `""` |
| storageClass.&ZeroWidthSpace;allowVolumeExpansion | Enable volume expansion for the default StorageClass. | `true` |
| tolerations | Tolerations to be applied to all components except external Chart dependencies. If any component has tolerations set, then it would override this value. For external components like etcd, jaeger and loki, tolerations can only be set at component level. | `[]` |

View File

@@ -0,0 +1,29 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# Don't package templates.
README.md.gotmpl
# Don't packages the tests used for CI.
/tests/

View File

@@ -0,0 +1,245 @@
# Changelog
> _Contributors should read our [contributors guide][] for instructions on how
> to update the changelog._
This document contains a historical list of changes between releases. Only
changes that impact end-user behavior are listed; changes to documentation or
internal API changes are not present.
Unreleased
----------
1.0.1 (2025-04-10)
----------
### Enhancements
- Update to Grafana Alloy v1.8.1. (@dehaansa)
- Update default configreloader resources to match what is set in prometheus-operator project (@dehaansa)
- Add Vertical Pod Autoscaler support (@QuentinBisson)
1.0.0 (2025-04-09)
----------
### Enhancements
- Update version to `1.0.0`. This Helm chart is now covered with the [backward-compatibility](https://grafana.com/docs/alloy/latest/introduction/backward-compatibility/) policy.
- Update to Grafana Alloy v1.8.0. (@thampiotr)
0.12.6 (2025-04-03)
----------
### Breaking changes
- configReloader.customArgs are likely to break as the prometheus maintained config reloader does not have the same arguments as the previous image (@dehaansa)
### Enhancements
- Change configReloader from jimmydyson/configmap-reload to prometheus-operator/prometheus-config-reloader (@dehaansa)
- Update to Grafana Alloy v1.7.5. (@kimxogus)
- Add `checksum/config` pod annotation (@kimxogus)
### Other changes
- Fix typo in values.yaml documentation (@petewall)
0.12.5 (2025-03-13)
----------
### Enhancements
- Update to Grafana Alloy v1.7.4. (@dehaansa)
0.12.4 (2025-03-13)
----------
### Enhancements
- Update to Grafana Alloy v1.7.3. (@dehaansa)
0.12.3 (2025-03-10)
----------
### Enhancements
- Add support for adding livenessProbe to agent container (@slimes28)
0.12.2 (2025-03-10)
----------
### Bug Fixes
- Set resource namespace correctly (@shinebayar-g)
### Enhancements
- Add a new `automountServiceAccountToken` configuration value for `serviceAccount`. (@ptodev)
- Update to Grafana Alloy v1.7.2. (@thampiotr)
0.12.1 (2025-02-26)
----------
### Enhancements
- Update to Grafana Alloy v1.7.1. (@thampiotr)
0.12.0 (2025-02-24)
----------
### Enhancements
- Update to Grafana Alloy v1.7.0. (@thampiotr)
0.11.0 (2025-01-23)
----------
### Enhancements
- Update jimmidyson/configmap-reload to 0.14.0. (@petewall)
- Add the ability to deploy extra manifest files. (@dbluxo)
0.10.1 (2024-12-03)
----------
### Enhancements
- Update to Grafana Alloy v1.5.1. (@ptodev)
0.10.0 (2024-11-13)
----------
### Enhancements
- Add support for adding hostAliases to the Helm chart. (@duncan485)
- Update to Grafana Alloy v1.5.0. (@thampiotr)
0.9.2 (2024-10-18)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.3. (@ptodev)
0.9.1 (2024-10-04)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.2. (@ptodev)
0.9.0 (2024-10-02)
------------------
### Enhancements
- Add lifecyle hook to the Helm chart. (@etiennep)
- Add terminationGracePeriodSeconds setting to the Helm chart. (@etiennep)
0.8.1 (2024-09-26)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.1. (@ptodev)
0.8.0 (2024-09-25)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.0. (@ptodev)
0.7.0 (2024-08-26)
------------------
### Enhancements
- Add PodDisruptionBudget to the Helm chart. (@itspouya)
0.6.1 (2024-08-23)
----------
### Enhancements
- Add the ability to set --cluster.name in the Helm chart with alloy.clustering.name. (@petewall)
- Add the ability to set appProtocol in extraPorts to help OpenShift users to expose gRPC. (@clementduveau)
### Other changes
- Update helm chart to use v1.3.1.
0.6.0 (2024-08-05)
------------------
### Other changes
- Update helm chart to use v1.3.0.
- Set `publishNotReadyAddresses` to `true` in the service spec for clustering to fix a bug where peers could not join on startup. (@wildum)
0.5.1 (2023-07-11)
------------------
### Other changes
- Update helm chart to use v1.2.1.
0.5.0 (2024-07-08)
------------------
### Enhancements
- Only utilize spec.internalTrafficPolicy in the Service if deploying to Kubernetes 1.26 or later. (@petewall)
0.4.0 (2024-06-26)
------------------
### Enhancements
- Update to Grafana Alloy v1.2.0. (@ptodev)
0.3.2 (2024-05-30)
------------------
### Bugfixes
- Update to Grafana Alloy v1.1.1. (@rfratto)
0.3.1 (2024-05-22)
------------------
### Bugfixes
- Fix clustering on instances running within Istio mesh by allowing to change the name of the clustering port
0.3.0 (2024-05-14)
------------------
### Enhancements
- Update to Grafana Alloy v1.1.0. (@rfratto)
0.2.0 (2024-05-08)
------------------
### Other changes
- Support all [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) (@nlamirault)
0.1.1 (2024-04-11)
------------------
### Other changes
- Add missing Alloy icon to Chart.yaml. (@rfratto)
0.1.0 (2024-04-09)
------------------
### Features
- Introduce a Grafana Alloy Helm chart. The Grafana Alloy Helm chart is
backwards compatibile with the values.yaml from the `grafana-agent` Helm
chart. Review the Helm chart README for a description on how to migrate.
(@rfratto)

View File

@@ -0,0 +1,6 @@
dependencies:
- name: crds
repository: ""
version: 0.0.0
digest: sha256:1980431a3d80822fca2e67e9cf16ff7a7f8d1dc87deb9e44d50e85e3e8e33a81
generated: "2025-04-11T09:30:48.378858526Z"

View File

@@ -0,0 +1,12 @@
apiVersion: v2
appVersion: v1.8.1
dependencies:
- condition: crds.create
name: crds
repository: ""
version: 0.0.0
description: Grafana Alloy
icon: https://raw.githubusercontent.com/grafana/alloy/main/docs/sources/assets/alloy_icon_orange.svg
name: alloy
type: application
version: 1.0.1

View File

@@ -0,0 +1,342 @@
# Grafana Alloy Helm chart
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![AppVersion: v1.8.1](https://img.shields.io/badge/AppVersion-v1.8.1-informational?style=flat-square)
Helm chart for deploying [Grafana Alloy][] to Kubernetes.
[Grafana Alloy]: https://grafana.com/docs/alloy/latest/
## Usage
### Setup Grafana chart repository
```
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```
### Install chart
To install the chart with the release name my-release:
`helm install my-release grafana/alloy`
This chart installs one instance of Grafana Alloy into your Kubernetes cluster
using a specific Kubernetes controller. By default, DaemonSet is used. The
`controller.type` value can be used to change the controller to either a
StatefulSet or Deployment.
Creating multiple installations of the Helm chart with different controllers is
useful if just using the default DaemonSet isn't sufficient.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| alloy.clustering.enabled | bool | `false` | Deploy Alloy in a cluster to allow for load distribution. |
| alloy.clustering.name | string | `""` | Name for the Alloy cluster. Used for differentiating between clusters. |
| alloy.clustering.portName | string | `"http"` | Name for the port used for clustering, useful if running inside an Istio Mesh |
| alloy.configMap.content | string | `""` | Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values. |
| alloy.configMap.create | bool | `true` | Create a new ConfigMap for the config file. |
| alloy.configMap.key | string | `nil` | Key in ConfigMap to get config from. |
| alloy.configMap.name | string | `nil` | Name of existing ConfigMap to use. Used when create is false. |
| alloy.enableReporting | bool | `true` | Enables sending Grafana Labs anonymous usage stats to help improve Grafana Alloy. |
| alloy.envFrom | list | `[]` | Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core |
| alloy.extraArgs | list | `[]` | Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/ |
| alloy.extraEnv | list | `[]` | Extra environment variables to pass to the Alloy container. |
| alloy.extraPorts | list | `[]` | Extra ports to expose on the Alloy container. |
| alloy.hostAliases | list | `[]` | Host aliases to add to the Alloy container. |
| alloy.lifecycle | object | `{}` | Set lifecycle hooks for the Grafana Alloy container. |
| alloy.listenAddr | string | `"0.0.0.0"` | Address to listen for traffic on. 0.0.0.0 exposes the UI to other containers. |
| alloy.listenPort | int | `12345` | Port to listen for traffic on. |
| alloy.listenScheme | string | `"HTTP"` | Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS" |
| alloy.livenessProbe | object | `{}` | Set livenessProbe for the Grafana Alloy container. |
| alloy.mounts.dockercontainers | bool | `false` | Mount /var/lib/docker/containers from the host into the container for log collection. |
| alloy.mounts.extra | list | `[]` | Extra volume mounts to add into the Grafana Alloy container. Does not affect the watch container. |
| alloy.mounts.varlog | bool | `false` | Mount /var/log from the host into the container for log collection. |
| alloy.resources | object | `{}` | Resource requests and limits to apply to the Grafana Alloy container. |
| alloy.securityContext | object | `{}` | Security context to apply to the Grafana Alloy container. |
| alloy.stabilityLevel | string | `"generally-available"` | Minimum stability level of components and behavior to enable. Must be one of "experimental", "public-preview", or "generally-available". |
| alloy.storagePath | string | `"/tmp/alloy"` | Path to where Grafana Alloy stores data (for example, the Write-Ahead Log). By default, data is lost between reboots. |
| alloy.uiPathPrefix | string | `"/"` | Base path where the UI is exposed. |
| configReloader.customArgs | list | `[]` | Override the args passed to the container. |
| configReloader.enabled | bool | `true` | Enables automatically reloading when the Alloy config changes. |
| configReloader.image.digest | string | `""` | SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag` |
| configReloader.image.registry | string | `"quay.io"` | Config reloader image registry (defaults to docker.io) |
| configReloader.image.repository | string | `"prometheus-operator/prometheus-config-reloader"` | Repository to get config reloader image from. |
| configReloader.image.tag | string | `"v0.81.0"` | Tag of image to use for config reloading. |
| configReloader.resources | object | `{"requests":{"cpu":"10m","memory":"50Mi"}}` | Resource requests and limits to apply to the config reloader container. |
| configReloader.securityContext | object | `{}` | Security context to apply to the Grafana configReloader container. |
| controller.affinity | object | `{}` | Affinity configuration for pods. |
| controller.autoscaling.enabled | bool | `false` | Creates a HorizontalPodAutoscaler for controller type deployment. Deprecated: Please use controller.autoscaling.horizontal instead |
| controller.autoscaling.horizontal | object | `{"enabled":false,"maxReplicas":5,"minReplicas":1,"scaleDown":{"policies":[],"selectPolicy":"Max","stabilizationWindowSeconds":300},"scaleUp":{"policies":[],"selectPolicy":"Max","stabilizationWindowSeconds":0},"targetCPUUtilizationPercentage":0,"targetMemoryUtilizationPercentage":80}` | Configures the Horizontal Pod Autoscaler for the controller. |
| controller.autoscaling.horizontal.enabled | bool | `false` | Enables the Horizontal Pod Autoscaler for the controller. |
| controller.autoscaling.horizontal.maxReplicas | int | `5` | The upper limit for the number of replicas to which the autoscaler can scale up. |
| controller.autoscaling.horizontal.minReplicas | int | `1` | The lower limit for the number of replicas to which the autoscaler can scale down. |
| controller.autoscaling.horizontal.scaleDown.policies | list | `[]` | List of policies to determine the scale-down behavior. |
| controller.autoscaling.horizontal.scaleDown.selectPolicy | string | `"Max"` | Determines which of the provided scaling-down policies to apply if multiple are specified. |
| controller.autoscaling.horizontal.scaleDown.stabilizationWindowSeconds | int | `300` | The duration that the autoscaling mechanism should look back on to make decisions about scaling down. |
| controller.autoscaling.horizontal.scaleUp.policies | list | `[]` | List of policies to determine the scale-up behavior. |
| controller.autoscaling.horizontal.scaleUp.selectPolicy | string | `"Max"` | Determines which of the provided scaling-up policies to apply if multiple are specified. |
| controller.autoscaling.horizontal.scaleUp.stabilizationWindowSeconds | int | `0` | The duration that the autoscaling mechanism should look back on to make decisions about scaling up. |
| controller.autoscaling.horizontal.targetCPUUtilizationPercentage | int | `0` | Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling. |
| controller.autoscaling.horizontal.targetMemoryUtilizationPercentage | int | `80` | Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling. |
| controller.autoscaling.maxReplicas | int | `5` | The upper limit for the number of replicas to which the autoscaler can scale up. |
| controller.autoscaling.minReplicas | int | `1` | The lower limit for the number of replicas to which the autoscaler can scale down. |
| controller.autoscaling.scaleDown.policies | list | `[]` | List of policies to determine the scale-down behavior. |
| controller.autoscaling.scaleDown.selectPolicy | string | `"Max"` | Determines which of the provided scaling-down policies to apply if multiple are specified. |
| controller.autoscaling.scaleDown.stabilizationWindowSeconds | int | `300` | The duration that the autoscaling mechanism should look back on to make decisions about scaling down. |
| controller.autoscaling.scaleUp.policies | list | `[]` | List of policies to determine the scale-up behavior. |
| controller.autoscaling.scaleUp.selectPolicy | string | `"Max"` | Determines which of the provided scaling-up policies to apply if multiple are specified. |
| controller.autoscaling.scaleUp.stabilizationWindowSeconds | int | `0` | The duration that the autoscaling mechanism should look back on to make decisions about scaling up. |
| controller.autoscaling.targetCPUUtilizationPercentage | int | `0` | Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling. |
| controller.autoscaling.targetMemoryUtilizationPercentage | int | `80` | Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling. |
| controller.autoscaling.vertical | object | `{"enabled":false,"recommenders":[],"resourcePolicy":{"containerPolicies":[{"containerName":"alloy","controlledResources":["cpu","memory"],"controlledValues":"RequestsAndLimits","maxAllowed":{},"minAllowed":{}}]},"updatePolicy":null}` | Configures the Vertical Pod Autoscaler for the controller. |
| controller.autoscaling.vertical.enabled | bool | `false` | Enables the Vertical Pod Autoscaler for the controller. |
| controller.autoscaling.vertical.recommenders | list | `[]` | List of recommenders to use for the Vertical Pod Autoscaler. Recommenders are responsible for generating recommendation for the object. List should be empty (then the default recommender will generate the recommendation) or contain exactly one recommender. |
| controller.autoscaling.vertical.resourcePolicy | object | `{"containerPolicies":[{"containerName":"alloy","controlledResources":["cpu","memory"],"controlledValues":"RequestsAndLimits","maxAllowed":{},"minAllowed":{}}]}` | Configures the resource policy for the Vertical Pod Autoscaler. |
| controller.autoscaling.vertical.resourcePolicy.containerPolicies | list | `[{"containerName":"alloy","controlledResources":["cpu","memory"],"controlledValues":"RequestsAndLimits","maxAllowed":{},"minAllowed":{}}]` | Configures the container policies for the Vertical Pod Autoscaler. |
| controller.autoscaling.vertical.resourcePolicy.containerPolicies[0].controlledResources | list | `["cpu","memory"]` | The controlled resources for the Vertical Pod Autoscaler. |
| controller.autoscaling.vertical.resourcePolicy.containerPolicies[0].controlledValues | string | `"RequestsAndLimits"` | The controlled values for the Vertical Pod Autoscaler. Needs to be either RequestsOnly or RequestsAndLimits. |
| controller.autoscaling.vertical.resourcePolicy.containerPolicies[0].maxAllowed | object | `{}` | The maximum allowed values for the pods. |
| controller.autoscaling.vertical.resourcePolicy.containerPolicies[0].minAllowed | object | `{}` | Defines the min allowed resources for the pod |
| controller.autoscaling.vertical.updatePolicy | string | `nil` | Configures the update policy for the Vertical Pod Autoscaler. |
| controller.dnsPolicy | string | `"ClusterFirst"` | Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy |
| controller.enableStatefulSetAutoDeletePVC | bool | `false` | Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'. |
| controller.extraAnnotations | object | `{}` | Annotations to add to controller. |
| controller.extraContainers | list | `[]` | Additional containers to run alongside the Alloy container and initContainers. |
| controller.hostNetwork | bool | `false` | Configures Pods to use the host network. When set to true, the ports that will be used must be specified. |
| controller.hostPID | bool | `false` | Configures Pods to use the host PID namespace. |
| controller.initContainers | list | `[]` | |
| controller.nodeSelector | object | `{}` | nodeSelector to apply to Grafana Alloy pods. |
| controller.parallelRollout | bool | `true` | Whether to deploy pods in parallel. Only used when controller.type is 'statefulset'. |
| controller.podAnnotations | object | `{}` | Extra pod annotations to add. |
| controller.podDisruptionBudget | object | `{"enabled":false,"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget configuration. |
| controller.podDisruptionBudget.enabled | bool | `false` | Whether to create a PodDisruptionBudget for the controller. |
| controller.podDisruptionBudget.maxUnavailable | string | `nil` | Maximum number of pods that can be unavailable during a disruption. Note: Only one of minAvailable or maxUnavailable should be set. |
| controller.podDisruptionBudget.minAvailable | string | `nil` | Minimum number of pods that must be available during a disruption. Note: Only one of minAvailable or maxUnavailable should be set. |
| controller.podLabels | object | `{}` | Extra pod labels to add. |
| controller.priorityClassName | string | `""` | priorityClassName to apply to Grafana Alloy pods. |
| controller.replicas | int | `1` | Number of pods to deploy. Ignored when controller.type is 'daemonset'. |
| controller.terminationGracePeriodSeconds | string | `nil` | Termination grace period in seconds for the Grafana Alloy pods. The default value used by Kubernetes if unspecifed is 30 seconds. |
| controller.tolerations | list | `[]` | Tolerations to apply to Grafana Alloy pods. |
| controller.topologySpreadConstraints | list | `[]` | Topology Spread Constraints to apply to Grafana Alloy pods. |
| controller.type | string | `"daemonset"` | Type of controller to use for deploying Grafana Alloy in the cluster. Must be one of 'daemonset', 'deployment', or 'statefulset'. |
| controller.updateStrategy | object | `{}` | Update strategy for updating deployed Pods. |
| controller.volumeClaimTemplates | list | `[]` | volumeClaimTemplates to add when controller.type is 'statefulset'. |
| controller.volumes.extra | list | `[]` | Extra volumes to add to the Grafana Alloy pod. |
| crds.create | bool | `true` | Whether to install CRDs for monitoring. |
| extraObjects | list | `[]` | Extra k8s manifests to deploy |
| fullnameOverride | string | `nil` | Overrides the chart's computed fullname. Used to change the full prefix of resource names. |
| global.image.pullSecrets | list | `[]` | Optional set of global image pull secrets. |
| global.image.registry | string | `""` | Global image registry to use if it needs to be overridden for some specific use cases (e.g local registries, custom images, ...) |
| global.podSecurityContext | object | `{}` | Security context to apply to the Grafana Alloy pod. |
| image.digest | string | `nil` | Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. |
| image.pullPolicy | string | `"IfNotPresent"` | Grafana Alloy image pull policy. |
| image.pullSecrets | list | `[]` | Optional set of image pull secrets. |
| image.registry | string | `"docker.io"` | Grafana Alloy image registry (defaults to docker.io) |
| image.repository | string | `"grafana/alloy"` | Grafana Alloy image repository. |
| image.tag | string | `nil` | Grafana Alloy image tag. When empty, the Chart's appVersion is used. |
| ingress.annotations | object | `{}` | |
| ingress.enabled | bool | `false` | Enables ingress for Alloy (Faro port) |
| ingress.extraPaths | list | `[]` | |
| ingress.faroPort | int | `12347` | |
| ingress.hosts[0] | string | `"chart-example.local"` | |
| ingress.labels | object | `{}` | |
| ingress.path | string | `"/"` | |
| ingress.pathType | string | `"Prefix"` | |
| ingress.tls | list | `[]` | |
| nameOverride | string | `nil` | Overrides the chart's name. Used to change the infix in the resource names. |
| namespaceOverride | string | `nil` | Overrides the chart's namespace. |
| rbac.create | bool | `true` | Whether to create RBAC resources for Alloy. |
| service.annotations | object | `{}` | |
| service.clusterIP | string | `""` | Cluster IP, can be set to None, empty "" or an IP address |
| service.enabled | bool | `true` | Creates a Service for the controller's pods. |
| service.internalTrafficPolicy | string | `"Cluster"` | Value for internal traffic policy. 'Cluster' or 'Local' |
| service.nodePort | int | `31128` | NodePort port. Only takes effect when `service.type: NodePort` |
| service.type | string | `"ClusterIP"` | Service type |
| serviceAccount.additionalLabels | object | `{}` | Additional labels to add to the created service account. |
| serviceAccount.annotations | object | `{}` | Annotations to add to the created service account. |
| serviceAccount.automountServiceAccountToken | bool | `true` | |
| serviceAccount.create | bool | `true` | Whether to create a service account for the Grafana Alloy deployment. |
| serviceAccount.name | string | `nil` | The name of the existing service account to use when serviceAccount.create is false. |
| serviceMonitor.additionalLabels | object | `{}` | Additional labels for the service monitor. |
| serviceMonitor.enabled | bool | `false` | |
| serviceMonitor.interval | string | `""` | Scrape interval. If not set, the Prometheus default scrape interval is used. |
| serviceMonitor.metricRelabelings | list | `[]` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion. ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig |
| serviceMonitor.relabelings | list | `[]` | RelabelConfigs to apply to samples before scraping ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig |
| serviceMonitor.tlsConfig | object | `{}` | Customize tls parameters for the service monitor |
#### Migrate from `grafana/grafana-agent` chart to `grafana/alloy`
The `values.yaml` file for the `grafana/grafana-agent` chart is compatible with
the chart for `grafana/alloy`, with two exceptions:
* The `agent` field in `values.yaml` is deprecated in favor of `alloy`. Support
for the `agent` field will be removed in a future release.
* The default value for `alloy.listenPort` is `12345` to align with the default
listen port in other installations. To retain the previous default, set
`alloy.listenPort` to `80` when installing.
### alloy.stabilityLevel
`alloy.stabilityLevel` controls the minimum level of stability for what
components can be created (directly or through imported modules). Note that
setting this field to a lower stability may also enable internal behaviour of a
lower stability, such as experimental memory optimizations.
Valid settings are `experimental`, `public-preview`, and `generally-available`.
### alloy.extraArgs
`alloy.extraArgs` allows for passing extra arguments to the Grafana Alloy
container. The list of available arguments is documented on [alloy run][].
> **WARNING**: Using `alloy.extraArgs` does not have a stable API. Things may
> break between Chart upgrade if an argument gets added to the template.
[alloy run]: https://grafana.com/docs/alloy/latest/reference/cli/run/
### alloy.extraPorts
`alloy.extraPorts` allows for configuring specific open ports.
The detained specification of ports can be found at the [Kubernetes Pod documents](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#ports).
Port numbers specified must be 0 < x < 65535.
| ChartPort | KubePort | Description |
|-----------|----------|-------------|
| targetPort | containerPort | Number of port to expose on the pod's IP address. |
| hostPort | hostPort | (Optional) Number of port to expose on the host. Daemonsets taking traffic might find this useful. |
| name | name | If specified, this must be an `IANA_SVC_NAME` and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
| protocol | protocol | Must be UDP, TCP, or SCTP. Defaults to "TCP". |
| appProtocol | appProtocol | Hint on application protocol. This is used to expose Alloy externally on OpenShift clusters using "h2c". Optional. No default value. |
### alloy.listenAddr
`alloy.listenAddr` allows for restricting which address Alloy listens on
for network traffic on its HTTP server. By default, this is `0.0.0.0` to allow
its UI to be exposed when port-forwarding and to expose its metrics to other
Alloy instances in the cluster.
### alloy.configMap.config
`alloy.configMap.content` holds the Grafana Alloy configuration to use.
If `alloy.configMap.content` is not provided, a [default configuration file][default-config] is
used. When provided, `alloy.configMap.content` must hold a valid Alloy configuration file.
[default-config]: ./config/example.alloy
### alloy.securityContext
`alloy.securityContext` sets the securityContext passed to the Grafana
Alloy container.
By default, Grafana Alloy containers are not able to collect telemetry from the
host node or other specific types of privileged telemetry data. See [Collecting
logs from other containers][#collecting-logs-from-other-containers] and
[Collecting host node telemetry][#collecting-host-node-telemetry] below for
more information on how to enable these capabilities.
### rbac.create
`rbac.create` enables the creation of ClusterRole and ClusterRoleBindings for
the Grafana Alloy containers to use. The default permission set allows
components like [discovery.kubernetes][] to work properly.
[discovery.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/
### controller.autoscaling
`controller.autoscaling.enabled` enables the creation of a HorizontalPodAutoscaler. It is only used when `controller.type` is set to `deployment` or `statefulset`.
`controller.autoscaling` is intended to be used with [clustered][] mode.
> **WARNING**: Using `controller.autoscaling` for any other Grafana Alloy
> configuration could lead to redundant or double telemetry collection.
[clustered]: https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode
When using autoscaling with a StatefulSet controller and have enabled
volumeClaimTemplates to be created alongside the StatefulSet, it is possible to
leak up to `maxReplicas` PVCs when the HPA is scaling down. If you're on
Kubernetes version `>=1.23-0` and your cluster has the
`StatefulSetAutoDeletePVC` feature gate enabled, you can set
`enableStatefulSetAutoDeletePVC` to true to automatically delete stale PVCs.
Using `controller.autoscaling` requires the target metric (cpu/memory) to have
its resource requests set up for both the Alloy and config-reloader containers
so that the HPA can use them to calculate the replica count from the actual
resource utilization.
## Collecting logs from other containers
There are two ways to collect logs from other containers within the cluster
Alloy is deployed in.
### loki.source.kubernetes
The [loki.source.kubernetes][] component may be used to collect logs from
containers using the Kubernetes API. This component does not require mounting
the hosts filesystem into Alloy, nor requires additional security contexts to
work correctly.
[loki.source.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes/
### File-based collection
Logs may also be collected by mounting the host's filesystem into the Alloy
container, bypassing the need to communicate with the Kubrnetes API.
To mount logs from other containers to Grafana Alloy directly:
* Set `alloy.mounts.dockercontainers` to `true`.
* Set `alloy.securityContext` to:
```yaml
privileged: true
runAsUser: 0
```
## Collecting host node telemetry
Telemetry from the host, such as host-specific log files (from `/var/logs`) or
metrics from `/proc` and `/sys` are not accessible to Grafana Alloy containers.
To expose this information to Grafana Alloy for telemetry collection:
* Set `alloy.mounts.dockercontainers` to `true`.
* Mount `/proc` and `/sys` from the host into the container.
* Set `alloy.securityContext` to:
```yaml
privileged: true
runAsUser: 0
```
## Expose Alloy externally on OpenShift clusters
If you want to send telemetry from an Alloy instance outside of the OpenShift clusters over gRPC towards the Alloy instance on the OpenShift clusters, you need to:
* Set the optional `appProtocol` on `alloy.extraPorts` to `h2c`
* Expose the service via Ingress or Route within the OpenShift cluster. Example of a Route in OpenShift:
```yaml
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: route-otlp-alloy-h2c
spec:
to:
kind: Service
name: test-grpc-h2c
weight: 100
port:
targetPort: otlp-grpc
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
wildcardPolicy: None
```
Once this Ingress/Route is exposed it would then allow gRPC communication for (for example) traces. This allow an Alloy instance on a VM or another Kubernetes/OpenShift cluster to be able to communicate over gRPC via the exposed Ingress or Route.

View File

@@ -0,0 +1,3 @@
apiVersion: v2
name: crds
version: 0.0.0

View File

@@ -0,0 +1,205 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
name: podlogs.monitoring.grafana.com
spec:
group: monitoring.grafana.com
names:
categories:
- grafana-alloy
- alloy
kind: PodLogs
listKind: PodLogsList
plural: podlogs
singular: podlogs
scope: Namespaced
versions:
- name: v1alpha2
schema:
openAPIV3Schema:
description: PodLogs defines how to collect logs for a Pod.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: PodLogsSpec defines how to collect logs for a Pod.
properties:
namespaceSelector:
description: Selector to select which namespaces the Pod objects are
discovered from.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
relabelings:
description: RelabelConfigs to apply to logs before delivering.
items:
description: 'RelabelConfig allows dynamic rewriting of the label
set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section
of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
default: replace
description: Action to perform based on regex matching. Default
is 'replace'. uppercase and lowercase actions require Prometheus
>= 2.36.
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex capture
groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label
values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels.
Their content is concatenated using the configured separator
and matched against the configured regular expression for
the replace, keep, and drop actions.
items:
description: LabelName is a valid Prometheus label name which
may only contain ASCII letters, numbers, as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in
a replace action. It is mandatory for replace actions. Regex
capture groups are available.
type: string
type: object
type: array
selector:
description: Selector to select Pod objects. Required.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
required:
- selector
type: object
type: object
served: true
storage: true

View File

@@ -0,0 +1,3 @@
serviceAccount:
additionalLabels:
test: "true"

View File

@@ -0,0 +1,7 @@
alloy:
clustering:
enabled: true
controller:
type: 'statefulset'
replicas: 3

View File

@@ -0,0 +1,5 @@
controller:
type: deployment
podDisruptionBudget:
enabled: true
maxUnavailable: 1

View File

@@ -0,0 +1,5 @@
controller:
type: deployment
podDisruptionBudget:
enabled: true
minAvailable: 1

View File

@@ -0,0 +1,5 @@
controller:
type: statefulset
podDisruptionBudget:
enabled: true
maxUnavailable: 1

View File

@@ -0,0 +1,5 @@
controller:
type: statefulset
podDisruptionBudget:
enabled: true
minAvailable: 1

View File

@@ -0,0 +1,12 @@
controller:
volumes:
extra:
- name: cache-volume
emptyDir:
sizeLimit: 500Mi
alloy:
mounts:
extra:
- mountPath: /cache
name: cache-volume

View File

@@ -0,0 +1,5 @@
# Test rendering of the chart with the controller explicitly set to DaemonSet.
controller:
type: daemonset
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet

View File

@@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to DaemonSet.
controller:
type: daemonset

View File

@@ -0,0 +1,26 @@
# Test rendering of the chart with the controller explicitly set to Deployment and autoscaling enabled.
controller:
type: deployment
autoscaling:
horizontal:
enabled: true
scaleDown:
policies:
- type: Pods
value: 4
periodSeconds: 60
selectPolicy: Min
stabilizationWindowSeconds: 100
scaleUp:
policies:
- type: Pods
value: 4
periodSeconds: 60
- type: Percent
value: 100
periodSeconds: 15
stabilizationWindowSeconds: 80
alloy:
resources:
requests:
memory: 100Mi

View File

@@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to Deployment.
controller:
type: deployment

View File

@@ -0,0 +1,26 @@
# Test rendering of the chart with the controller explicitly set to StatefulSet and autoscaling the old way enabled.
controller:
type: statefulset
autoscaling:
enabled: true
scaleDown:
policies:
- type: Pods
value: 4
periodSeconds: 60
selectPolicy: Min
stabilizationWindowSeconds: 100
scaleUp:
policies:
- type: Pods
value: 4
periodSeconds: 60
- type: Percent
value: 100
periodSeconds: 15
stabilizationWindowSeconds: 80
enableStatefulSetAutoDeletePVC: true
alloy:
resources:
requests:
memory: 100Mi

View File

@@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to StatefulSet.
controller:
type: statefulset

View File

@@ -0,0 +1,10 @@
alloy:
configMap:
content: |-
logging {
level = "warn"
format = "logfmt"
}
discovery.kubernetes "custom_pods" {
role = "pod"
}

View File

@@ -0,0 +1 @@
# Test rendering of the chart with everything set to the default values.

View File

@@ -0,0 +1,9 @@
# Test rendering of the chart with the service monitor enabled
alloy:
listenScheme: HTTPS
service:
enabled: true
serviceMonitor:
enabled: true
tlsConfig:
insecureSkipVerify: true

View File

@@ -0,0 +1,5 @@
# Test rendering of the chart with the service monitor enabled
service:
enabled: true
serviceMonitor:
enabled: true

View File

@@ -0,0 +1,5 @@
# Specify extra ports for verifying rendering the template works
alloy:
envFrom:
- configMapRef:
name: special-config

View File

@@ -0,0 +1,5 @@
alloy:
configMap:
create: false
name: existing-config
key: my-config.alloy

View File

@@ -0,0 +1,9 @@
# Specify extra ports for verifying rendering the template works
alloy:
extraEnv:
- name: GREETING
value: "Warm greetings to"
- name: HONORIFIC
value: "The Most Honorable"
- name: NAME
value: "Kubernetes"

View File

@@ -0,0 +1,8 @@
extraObjects:
- apiVersion: v1
kind: Secret
metadata:
name: grafana-cloud
stringData:
PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
PROMETHEUS_USERNAME: '123456'

View File

@@ -0,0 +1,7 @@
# Specify extra ports for verifying rendering the template works
alloy:
extraPorts:
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP

View File

@@ -0,0 +1,9 @@
alloy:
extraPorts:
- name: "faro"
port: 12347
targetPort: 12347
protocol: "TCP"
ingress:
enabled: true

View File

@@ -0,0 +1,13 @@
# Test rendering of the chart with the global image pull secret explicitly set.
global:
image:
pullSecrets:
- name: global-cred
podSecurityContext:
runAsUser: 1000
runAsGroup: 1000
image:
pullSecrets:
- name: local-cred

View File

@@ -0,0 +1,11 @@
# Test rendering of the chart with the global image registry explicitly set to another value.
global:
image:
registry: quay.io
image:
registry: docker.com # Invalid value by default
configReloader:
image:
registry: docker.com

View File

@@ -0,0 +1,5 @@
alloy:
hostAliases:
- ip: "20.21.22.23"
hostnames:
- "grafana.company.net"

View File

@@ -0,0 +1,29 @@
controller:
initContainers:
- name: geo-ip
image: ghcr.io/maxmind/geoipupdate:v6.0
volumeMounts:
- name: geoip
mountPath: /etc/geoip
volumes:
- name: geoip
emptyDir: {}
env:
- name: GEOIPUPDATE_ACCOUNT_ID
value: "geoipupdate_account_id"
- name: GEOIPUPDATE_LICENSE_KEY
value: "geoipupdate_license_key"
- name: GEOIPUPDATE_EDITION_IDS
value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country"
- name: GEOIPUPDATE_DB_DIR
value: "/etc/geoip"
volumes:
extra:
- name: geoip
mountPath: /etc/geoip
alloy:
mounts:
extra:
- name: geoip
mountPath: /etc/geoip

View File

@@ -0,0 +1,8 @@
controller:
type: deployment
alloy:
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 1"]

View File

@@ -0,0 +1,11 @@
alloy:
livenessProbe:
httpGet:
path: /metrics
port: 12345
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 2
periodSeconds: 30
successThreshold: 1
failureThreshold: 3

View File

@@ -0,0 +1,4 @@
# Test rendering of the chart with the image pull secret explicitly set.
image:
pullSecrets:
- name: local-cred

View File

@@ -0,0 +1,7 @@
# Test rendering of the chart with the individual image registries explicitly set to another value.
image:
registry: quay.io
configReloader:
image:
registry: quay.io

View File

@@ -0,0 +1,11 @@
controller:
nodeSelector:
key1: "value1"
tolerations:
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoSchedule"
- key: "key2"
operator: "Exists"
effect: "NoSchedule"

View File

@@ -0,0 +1,7 @@
global:
podSecurityContext:
fsGroup: 473
alloy:
securityContext:
runAsUser: 473
runAsGroup: 473

View File

@@ -0,0 +1,4 @@
# Test correct rendering of the pod annotations
controller:
podAnnotations:
testAnnotationKey: testAnnotationValue

View File

@@ -0,0 +1,29 @@
controller:
extraContainers:
- name: geo-ip
image: ghcr.io/maxmind/geoipupdate:v6.0
volumeMounts:
- name: geoip
mountPath: /etc/geoip
volumes:
- name: geoip
emptyDir: {}
env:
- name: GEOIPUPDATE_ACCOUNT_ID
value: "geoipupdate_account_id"
- name: GEOIPUPDATE_LICENSE_KEY
value: "geoipupdate_license_key"
- name: GEOIPUPDATE_EDITION_IDS
value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country"
- name: GEOIPUPDATE_DB_DIR
value: "/etc/geoip"
volumes:
extra:
- name: geoip
mountPath: /etc/geoip
alloy:
mounts:
extra:
- name: geoip
mountPath: /etc/geoip

View File

@@ -0,0 +1,3 @@
controller:
type: deployment
terminationGracePeriodSeconds: 20

View File

@@ -0,0 +1,10 @@
controller:
type: deployment
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: alloy
app.kubernetes.io/instance: alloy

View File

@@ -0,0 +1,10 @@
image:
registry: "docker.io"
repository: "grafana/agent"
digest: "sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf"
configReloader:
image:
registry: "docker.io"
repository: "jimmidyson/configmap-reload"
digest: "sha256:5af9d3041d12a3e63f115125f89b66d2ba981fe82e64302ac370c5496055059c"

View File

@@ -0,0 +1,28 @@
logging {
level = "info"
format = "logfmt"
}
discovery.kubernetes "pods" {
role = "pod"
}
discovery.kubernetes "nodes" {
role = "node"
}
discovery.kubernetes "services" {
role = "service"
}
discovery.kubernetes "endpoints" {
role = "endpoints"
}
discovery.kubernetes "endpointslices" {
role = "endpointslice"
}
discovery.kubernetes "ingresses" {
role = "ingress"
}

View File

@@ -0,0 +1 @@
Welcome to Grafana Alloy!

View File

@@ -0,0 +1,25 @@
{{/*
Retrieve configMap name from the name of the chart or the ConfigMap the user
specified.
*/}}
{{- define "alloy.config-map.name" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.name -}}
{{- $values.configMap.name }}
{{- else -}}
{{- include "alloy.fullname" . }}
{{- end }}
{{- end }}
{{/*
The name of the config file is the default or the key the user specified in the
ConfigMap.
*/}}
{{- define "alloy.config-map.key" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.key -}}
{{- $values.configMap.key }}
{{- else -}}
config.alloy
{{- end }}
{{- end }}

View File

@@ -0,0 +1,162 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "alloy.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "alloy.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "alloy.chart" -}}
{{- if index .Values "$chart_tests" }}
{{- printf "%s" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "alloy.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "alloy.labels" -}}
helm.sh/chart: {{ include "alloy.chart" . }}
{{ include "alloy.selectorLabels" . }}
{{- if index .Values "$chart_tests" }}
app.kubernetes.io/version: "vX.Y.Z"
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- else -}}
{{/* substr trims delimeter prefix char from alloy.imageId output
e.g. ':' for tags and '@' for digests.
For digests, we crop the string to a 7-char (short) sha. */}}
app.kubernetes.io/version: {{ (include "alloy.imageId" .) | trunc 15 | trimPrefix "@sha256" | trimPrefix ":" | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: alloy
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "alloy.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alloy.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "alloy.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "alloy.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Calculate name of image ID to use for "alloy.
*/}}
{{- define "alloy.imageId" -}}
{{- if .Values.image.digest }}
{{- $digest := .Values.image.digest }}
{{- if not (hasPrefix "sha256:" $digest) }}
{{- $digest = printf "sha256:%s" $digest }}
{{- end }}
{{- printf "@%s" $digest }}
{{- else if .Values.image.tag }}
{{- printf ":%s" .Values.image.tag }}
{{- else }}
{{- printf ":%s" .Chart.AppVersion }}
{{- end }}
{{- end }}
{{/*
Calculate name of image ID to use for "config-reloader".
*/}}
{{- define "config-reloader.imageId" -}}
{{- if .Values.configReloader.image.digest }}
{{- $digest := .Values.configReloader.image.digest }}
{{- if not (hasPrefix "sha256:" $digest) }}
{{- $digest = printf "sha256:%s" $digest }}
{{- end }}
{{- printf "@%s" $digest }}
{{- else if .Values.configReloader.image.tag }}
{{- printf ":%s" .Values.configReloader.image.tag }}
{{- else }}
{{- printf ":%s" "v0.8.0" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "alloy.ingress.apiVersion" -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }}
{{- print "networking.k8s.io/v1" }}
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
{{- print "networking.k8s.io/v1beta1" }}
{{- else }}
{{- print "extensions/v1beta1" }}
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "alloy.ingress.isStable" -}}
{{- eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1" }}
{{- end }}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "alloy.ingress.supportsIngressClassName" -}}
{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "alloy.ingress.supportsPathType" -}}
{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Return the appropriate apiVersion for PodDisruptionBudget.
*/}}
{{- define "alloy.controller.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version) -}}
{{- print "policy/v1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,38 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.clustering.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "alloy.fullname" . }}-cluster
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
spec:
type: ClusterIP
clusterIP: 'None'
publishNotReadyAddresses: true
selector:
{{- include "alloy.selectorLabels" . | nindent 4 }}
ports:
# Do not include the -metrics suffix in the port name, otherwise metrics
# can be double-collected with the non-headless Service if it's also
# enabled.
#
# This service should only be used for clustering, and not metric
# collection.
- name: {{ $values.clustering.portName }}
port: {{ $values.listenPort }}
targetPort: {{ $values.listenPort }}
protocol: "TCP"
{{- range $portMap := $values.extraPorts }}
- name: {{ $portMap.name }}
port: {{ $portMap.port }}
targetPort: {{ $portMap.targetPort }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- if not (empty $portMap.appProtocol) }}
# Useful for OpenShift clusters that want to expose Alloy ports externally
appProtocol: {{ $portMap.appProtocol }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.create }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alloy.config-map.name" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: config
data:
{{- if $values.configMap.content }}
config.alloy: |- {{- (tpl $values.configMap.content .) | nindent 4 }}
{{- else }}
config.alloy: |- {{- .Files.Get "config/example.alloy" | trim | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,92 @@
{{- define "alloy.container" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
- name: alloy
image: {{ .Values.global.image.registry | default .Values.image.registry }}/{{ .Values.image.repository }}{{ include "alloy.imageId" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- run
- /etc/alloy/{{ include "alloy.config-map.key" . }}
- --storage.path={{ $values.storagePath }}
- --server.http.listen-addr={{ $values.listenAddr }}:{{ $values.listenPort }}
- --server.http.ui-path-prefix={{ $values.uiPathPrefix }}
{{- if not $values.enableReporting }}
- --disable-reporting
{{- end}}
{{- if $values.clustering.enabled }}
- --cluster.enabled=true
- --cluster.join-addresses={{ include "alloy.fullname" . }}-cluster
{{- if $values.clustering.name }}
- --cluster.name={{ $values.clustering.name }}
{{- end}}
{{- end}}
{{- if $values.stabilityLevel }}
- --stability.level={{ $values.stabilityLevel }}
{{- end }}
{{- range $values.extraArgs }}
- {{ . }}
{{- end}}
env:
- name: ALLOY_DEPLOY_MODE
value: "helm"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- range $values.extraEnv }}
- {{- toYaml . | nindent 6 }}
{{- end }}
{{- if $values.envFrom }}
envFrom:
{{- toYaml $values.envFrom | nindent 4 }}
{{- end }}
ports:
- containerPort: {{ $values.listenPort }}
name: http-metrics
{{- range $portMap := $values.extraPorts }}
- containerPort: {{ $portMap.targetPort }}
{{- if $portMap.hostPort }}
hostPort: {{ $portMap.hostPort }}
{{- end}}
name: {{ $portMap.name }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- end }}
readinessProbe:
httpGet:
path: /-/ready
port: {{ $values.listenPort }}
scheme: {{ $values.listenScheme }}
initialDelaySeconds: 10
timeoutSeconds: 1
{{- with $values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $values.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $values.securityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/alloy
{{- if $values.mounts.varlog }}
- name: varlog
mountPath: /var/log
readOnly: true
{{- end }}
{{- if $values.mounts.dockercontainers }}
- name: dockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
{{- end }}
{{- range $values.mounts.extra }}
- {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- define "alloy.watch-container" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if .Values.configReloader.enabled -}}
- name: config-reloader
image: {{ .Values.global.image.registry | default .Values.configReloader.image.registry }}/{{ .Values.configReloader.image.repository }}{{ include "config-reloader.imageId" . }}
{{- if .Values.configReloader.customArgs }}
args:
{{- toYaml .Values.configReloader.customArgs | nindent 4 }}
{{- else }}
args:
- --watched-dir=/etc/alloy
- --reload-url=http://localhost:{{ $values.listenPort }}/-/reload
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/alloy
{{- with .Values.configReloader.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.configReloader.securityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,93 @@
{{- define "alloy.pod-template" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
metadata:
annotations:
kubectl.kubernetes.io/default-container: alloy
{{- if and $values.configMap.create $values.configMap.content }}
checksum/config: {{ (tpl $values.configMap.content .) | sha256sum | trunc 63 }}
{{- end }}
{{- with .Values.controller.podAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "alloy.selectorLabels" . | nindent 4 }}
{{- with .Values.controller.podLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.global.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
serviceAccountName: {{ include "alloy.serviceAccountName" . }}
{{- if or .Values.global.image.pullSecrets .Values.image.pullSecrets }}
imagePullSecrets:
{{- if .Values.global.image.pullSecrets }}
{{- toYaml .Values.global.image.pullSecrets | nindent 4 }}
{{- else }}
{{- toYaml .Values.image.pullSecrets | nindent 4 }}
{{- end }}
{{- end }}
{{- if .Values.controller.initContainers }}
initContainers:
{{- with .Values.controller.initContainers }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
containers:
{{- include "alloy.container" . | nindent 4 }}
{{- include "alloy.watch-container" . | nindent 4 }}
{{- with .Values.controller.extraContainers }}
{{- toYaml . | nindent 4 }}
{{- end}}
{{- if .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName }}
{{- end }}
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
{{- end }}
{{- if .Values.controller.hostPID }}
hostPID: {{ .Values.controller.hostPID }}
{{- end }}
dnsPolicy: {{ .Values.controller.dnsPolicy }}
{{- with .Values.controller.affinity }}
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.controller.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds | int }}
{{- end }}
{{- with .Values.controller.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 4 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "alloy.config-map.name" . }}
{{- if $values.mounts.varlog }}
- name: varlog
hostPath:
path: /var/log
{{- end }}
{{- if $values.mounts.dockercontainers }}
- name: dockercontainers
hostPath:
path: /var/lib/docker/containers
{{- end }}
{{- if .Values.controller.volumes.extra }}
{{- toYaml .Values.controller.volumes.extra | nindent 4 }}
{{- end }}
{{- if $values.hostAliases }}
hostAliases:
{{- toYaml $values.hostAliases | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if eq .Values.controller.type "daemonset" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,29 @@
{{- if eq .Values.controller.type "deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.controller.autoscaling.enabled }}
replicas: {{ .Values.controller.replicas }}
{{- end }}
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,51 @@
{{- if eq .Values.controller.type "statefulset" }}
{{- if .Values.enableStatefulSetAutoDeletePVC }}
{{- fail "Value 'enableStatefulSetAutoDeletePVC' should be nested inside 'controller' options." }}
{{- end }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.controller.autoscaling.enabled }}
replicas: {{ .Values.controller.replicas }}
{{- end }}
{{- if .Values.controller.parallelRollout }}
podManagementPolicy: Parallel
{{- end }}
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
serviceName: {{ include "alloy.fullname" . }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.volumeClaimTemplates }}
volumeClaimTemplates:
{{- range . }}
- {{ toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.controller.enableStatefulSetAutoDeletePVC) }}
{{- /*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
*/}}
persistentVolumeClaimRetentionPolicy:
whenDeleted: Delete
whenScaled: Delete
{{- end }}
{{- end }}

View File

@@ -0,0 +1,4 @@
{{ range .Values.extraObjects }}
---
{{ tpl (toYaml .) $ }}
{{ end }}

View File

@@ -0,0 +1,83 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if and (or (eq .Values.controller.type "deployment") (eq .Values.controller.type "statefulset" )) (or .Values.controller.autoscaling.horizontal.enabled .Values.controller.autoscaling.enabled) }}
{{ $autoscaling := .Values.controller.autoscaling }}
{{- if .Values.controller.autoscaling.horizontal.enabled }}
{{- $autoscaling = .Values.controller.autoscaling.horizontal }}
{{- end }}
{{- if (not (empty $autoscaling.targetMemoryUtilizationPercentage)) }}
{{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}}
{{- $_ := $values.resources.requests.memory | required ".Values.alloy.resources.requests.memory is required when using autoscaling based on memory utilization." -}}
{{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}}
{{- $_ := .Values.configReloader.resources.requests.memory | required ".Values.configReloader.resources.requests.memory is required when using autoscaling based on memory utilization." -}}
{{- end}}
{{- if (not (empty $autoscaling.targetCPUUtilizationPercentage)) }}
{{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}}
{{- $_ := $values.resources.requests.cpu | required ".Values.alloy.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}}
{{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}}
{{- $_ := .Values.configReloader.resources.requests.cpu | required ".Values.configReloader.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}}
{{- end}}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: availability
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: {{ .Values.controller.type }}
name: {{ include "alloy.fullname" . }}
{{- with $autoscaling }}
minReplicas: {{ .minReplicas }}
maxReplicas: {{ .maxReplicas }}
behavior:
{{- with .scaleDown }}
scaleDown:
{{- if .policies }}
policies:
{{- range .policies }}
- type: {{ .type }}
value: {{ .value }}
periodSeconds: {{ .periodSeconds }}
{{- end }}
selectPolicy: {{ .selectPolicy }}
{{- end }}
stabilizationWindowSeconds: {{ .stabilizationWindowSeconds }}
{{- end }}
{{- with .scaleUp }}
scaleUp:
{{- if .policies }}
policies:
{{- range .policies }}
- type: {{ .type }}
value: {{ .value }}
periodSeconds: {{ .periodSeconds }}
{{- end }}
selectPolicy: {{ .selectPolicy }}
{{- end }}
stabilizationWindowSeconds: {{ .stabilizationWindowSeconds }}
{{- end }}
metrics:
# Changing the order of the metrics will cause ArgoCD to go into a sync loop
# memory needs to be first.
# More info in: https://github.com/argoproj/argo-cd/issues/1079
{{- with .targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- with .targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,79 @@
{{- if .Values.ingress.enabled -}}
{{- $ingressApiIsStable := eq (include "alloy.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "alloy.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "alloy.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "alloy.fullname" . -}}
{{- $servicePort := .Values.ingress.faroPort -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $ingressPathType := .Values.ingress.pathType -}}
{{- $extraPaths := .Values.ingress.extraPaths -}}
apiVersion: {{ include "alloy.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
{{- with .Values.ingress.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ tpl $value $ | quote }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- with .Values.ingress.tls }}
tls:
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ tpl . $ }}
http:
paths:
{{- with $extraPaths }}
{{- toYaml . | nindent 10 }}
{{- end }}
- path: {{ $ingressPath }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- end }}
{{- else }}
- http:
paths:
- backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- with $ingressPath }}
path: {{ . }}
{{- end }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
{{- end -}}
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.controller.podDisruptionBudget.enabled }}
{{- if eq .Values.controller.type "daemonset" }}
{{- fail "PDBs (Pod Disruption Budgets) are not intended for DaemonSets. Please use a different controller type." }}
{{- end }}
{{- if and .Values.controller.podDisruptionBudget.minAvailable .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- fail "Only one of minAvailable or maxUnavailable should be defined for PodDisruptionBudget" }}
{{- end }}
{{- if not (or .Values.controller.podDisruptionBudget.minAvailable .Values.controller.podDisruptionBudget.maxUnavailable) }}
{{- fail "Either minAvailable or maxUnavailable must be defined for PodDisruptionBudget" }}
{{- end }}
apiVersion: {{ include "alloy.controller.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
{{- if .Values.controller.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.controller.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.controller.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,112 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
rules:
# Rules which allow discovery.kubernetes to function.
- apiGroups:
- ""
- "discovery.k8s.io"
- "networking.k8s.io"
resources:
- endpoints
- endpointslices
- ingresses
- nodes
- nodes/proxy
- nodes/metrics
- pods
- services
verbs:
- get
- list
- watch
# Rules which allow loki.source.kubernetes and loki.source.podlogs to work.
- apiGroups:
- ""
resources:
- pods
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- "monitoring.grafana.com"
resources:
- podlogs
verbs:
- get
- list
- watch
# Rules which allow mimir.rules.kubernetes to work.
- apiGroups: ["monitoring.coreos.com"]
resources:
- prometheusrules
verbs:
- get
- list
- watch
- nonResourceURLs:
- /metrics
verbs:
- get
# Rules for prometheus.kubernetes.*
- apiGroups: ["monitoring.coreos.com"]
resources:
- podmonitors
- servicemonitors
- probes
- scrapeconfigs
verbs:
- get
- list
- watch
# Rules which allow eventhandler to work.
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
# needed for remote.kubernetes.*
- apiGroups: [""]
resources:
- "configmaps"
- "secrets"
verbs:
- get
- list
- watch
# needed for otelcol.processor.k8sattributes
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "alloy.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "alloy.serviceAccountName" . }}
namespace: {{ include "alloy.namespace" . }}
{{- end }}

View File

@@ -0,0 +1,43 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if .Values.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
selector:
{{- include "alloy.selectorLabels" . | nindent 4 }}
{{- if semverCompare ">=1.26-0" .Capabilities.KubeVersion.Version }}
internalTrafficPolicy: {{.Values.service.internalTrafficPolicy}}
{{- end }}
ports:
- name: http-metrics
{{- if eq .Values.service.type "NodePort" }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
port: {{ $values.listenPort }}
targetPort: {{ $values.listenPort }}
protocol: "TCP"
{{- range $portMap := $values.extraPorts }}
- name: {{ $portMap.name }}
port: {{ $portMap.port }}
targetPort: {{ $portMap.targetPort }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- if not (empty $portMap.appProtocol) }}
# Useful for OpenShift clusters that want to expose Alloy ports externally
appProtocol: {{ $portMap.appProtocol }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ include "alloy.serviceAccountName" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
{{- with .Values.serviceAccount.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,37 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if and .Values.service.enabled .Values.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
scheme: {{ $values.listenScheme | lower }}
honorLabels: true
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | nindent 6) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | nindent 6) . }}
{{- end }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Capabilities.APIVersions.Has "autoscaling.k8s.io/v1" -}}
{{- if .Values.controller.autoscaling.vertical.enabled -}}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: availability
spec:
{{- with .Values.controller.autoscaling.vertical }}
{{- with .recommenders }}
recommenders:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .resourcePolicy }}
resourcePolicy:
{{- with .containerPolicies }}
containerPolicies:
{{- range . }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- with .updatePolicy }}
updatePolicy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
targetRef:
apiVersion: apps/v1
{{- if eq .Values.controller.type "deployment" }}
kind: Deployment
{{- else if eq .Values.controller.type "statefulset" }}
kind: StatefulSet
{{- else }}
kind: DaemonSet
{{- end }}
name: {{ include "alloy.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,463 @@
# -- Overrides the chart's name. Used to change the infix in the resource names.
nameOverride: null
# -- Overrides the chart's namespace.
namespaceOverride: null
# -- Overrides the chart's computed fullname. Used to change the full prefix of
# resource names.
fullnameOverride: null
## Global properties for image pulling override the values defined under `image.registry` and `configReloader.image.registry`.
## If you want to override only one image registry, use the specific fields but if you want to override them all, use `global.image.registry`
global:
image:
# -- Global image registry to use if it needs to be overridden for some specific use cases (e.g local registries, custom images, ...)
registry: ""
# -- Optional set of global image pull secrets.
pullSecrets: []
# -- Security context to apply to the Grafana Alloy pod.
podSecurityContext: {}
crds:
# -- Whether to install CRDs for monitoring.
create: true
## Various Alloy settings. For backwards compatibility with the grafana-agent
## chart, this field may also be called "agent". Naming this field "agent" is
## deprecated and will be removed in a future release.
alloy:
configMap:
# -- Create a new ConfigMap for the config file.
create: true
# -- Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values.
content: ''
# -- Name of existing ConfigMap to use. Used when create is false.
name: null
# -- Key in ConfigMap to get config from.
key: null
clustering:
# -- Deploy Alloy in a cluster to allow for load distribution.
enabled: false
# -- Name for the Alloy cluster. Used for differentiating between clusters.
name: ""
# -- Name for the port used for clustering, useful if running inside an Istio Mesh
portName: http
# -- Minimum stability level of components and behavior to enable. Must be
# one of "experimental", "public-preview", or "generally-available".
stabilityLevel: "generally-available"
# -- Path to where Grafana Alloy stores data (for example, the Write-Ahead Log).
# By default, data is lost between reboots.
storagePath: /tmp/alloy
# -- Address to listen for traffic on. 0.0.0.0 exposes the UI to other
# containers.
listenAddr: 0.0.0.0
# -- Port to listen for traffic on.
listenPort: 12345
# -- Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS"
listenScheme: HTTP
# -- Base path where the UI is exposed.
uiPathPrefix: /
# -- Enables sending Grafana Labs anonymous usage stats to help improve Grafana
# Alloy.
enableReporting: true
# -- Extra environment variables to pass to the Alloy container.
extraEnv: []
# -- Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core
envFrom: []
# -- Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/
extraArgs: []
# -- Extra ports to expose on the Alloy container.
extraPorts: []
# - name: "faro"
# port: 12347
# targetPort: 12347
# protocol: "TCP"
# appProtocol: "h2c"
# -- Host aliases to add to the Alloy container.
hostAliases: []
# - ip: "20.21.22.23"
# hostnames:
# - "company.grafana.net"
mounts:
# -- Mount /var/log from the host into the container for log collection.
varlog: false
# -- Mount /var/lib/docker/containers from the host into the container for log
# collection.
dockercontainers: false
# -- Extra volume mounts to add into the Grafana Alloy container. Does not
# affect the watch container.
extra: []
# -- Security context to apply to the Grafana Alloy container.
securityContext: {}
# -- Resource requests and limits to apply to the Grafana Alloy container.
resources: {}
# -- Set lifecycle hooks for the Grafana Alloy container.
lifecycle: {}
# preStop:
# exec:
# command:
# - /bin/sleep
# - "10"
# -- Set livenessProbe for the Grafana Alloy container.
livenessProbe: {}
image:
# -- Grafana Alloy image registry (defaults to docker.io)
registry: "docker.io"
# -- Grafana Alloy image repository.
repository: grafana/alloy
# -- (string) Grafana Alloy image tag. When empty, the Chart's appVersion is
# used.
tag: null
# -- Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`.
digest: null
# -- Grafana Alloy image pull policy.
pullPolicy: IfNotPresent
# -- Optional set of image pull secrets.
pullSecrets: []
rbac:
# -- Whether to create RBAC resources for Alloy.
create: true
serviceAccount:
# -- Whether to create a service account for the Grafana Alloy deployment.
create: true
# -- Additional labels to add to the created service account.
additionalLabels: {}
# -- Annotations to add to the created service account.
annotations: {}
# -- The name of the existing service account to use when
# serviceAccount.create is false.
name: null
# Whether the Alloy pod should automatically mount the service account token.
automountServiceAccountToken: true
# Options for the extra controller used for config reloading.
configReloader:
# -- Enables automatically reloading when the Alloy config changes.
enabled: true
image:
# -- Config reloader image registry (defaults to docker.io)
registry: "quay.io"
# -- Repository to get config reloader image from.
repository: prometheus-operator/prometheus-config-reloader
# -- Tag of image to use for config reloading.
tag: v0.81.0
# -- SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag`
digest: ""
# -- Override the args passed to the container.
customArgs: []
# -- Resource requests and limits to apply to the config reloader container.
resources:
requests:
cpu: "10m"
memory: "50Mi"
# -- Security context to apply to the Grafana configReloader container.
securityContext: {}
controller:
# -- Type of controller to use for deploying Grafana Alloy in the cluster.
# Must be one of 'daemonset', 'deployment', or 'statefulset'.
type: 'daemonset'
# -- Number of pods to deploy. Ignored when controller.type is 'daemonset'.
replicas: 1
# -- Annotations to add to controller.
extraAnnotations: {}
# -- Whether to deploy pods in parallel. Only used when controller.type is
# 'statefulset'.
parallelRollout: true
# -- Configures Pods to use the host network. When set to true, the ports that will be used must be specified.
hostNetwork: false
# -- Configures Pods to use the host PID namespace.
hostPID: false
# -- Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ClusterFirst
# -- Termination grace period in seconds for the Grafana Alloy pods.
# The default value used by Kubernetes if unspecifed is 30 seconds.
terminationGracePeriodSeconds: null
# -- Update strategy for updating deployed Pods.
updateStrategy: {}
# -- nodeSelector to apply to Grafana Alloy pods.
nodeSelector: {}
# -- Tolerations to apply to Grafana Alloy pods.
tolerations: []
# -- Topology Spread Constraints to apply to Grafana Alloy pods.
topologySpreadConstraints: []
# -- priorityClassName to apply to Grafana Alloy pods.
priorityClassName: ''
# -- Extra pod annotations to add.
podAnnotations: {}
# -- Extra pod labels to add.
podLabels: {}
# -- PodDisruptionBudget configuration.
podDisruptionBudget:
# -- Whether to create a PodDisruptionBudget for the controller.
enabled: false
# -- Minimum number of pods that must be available during a disruption.
# Note: Only one of minAvailable or maxUnavailable should be set.
minAvailable: null
# -- Maximum number of pods that can be unavailable during a disruption.
# Note: Only one of minAvailable or maxUnavailable should be set.
maxUnavailable: null
# -- Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'.
enableStatefulSetAutoDeletePVC: false
autoscaling:
# -- Creates a HorizontalPodAutoscaler for controller type deployment.
# Deprecated: Please use controller.autoscaling.horizontal instead
enabled: false
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
minReplicas: 1
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
maxReplicas: 5
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
targetCPUUtilizationPercentage: 0
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
targetMemoryUtilizationPercentage: 80
scaleDown:
# -- List of policies to determine the scale-down behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-down policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
stabilizationWindowSeconds: 300
scaleUp:
# -- List of policies to determine the scale-up behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-up policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
stabilizationWindowSeconds: 0
# -- Configures the Horizontal Pod Autoscaler for the controller.
horizontal:
# -- Enables the Horizontal Pod Autoscaler for the controller.
enabled: false
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
minReplicas: 1
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
maxReplicas: 5
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
targetCPUUtilizationPercentage: 0
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
targetMemoryUtilizationPercentage: 80
scaleDown:
# -- List of policies to determine the scale-down behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-down policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
stabilizationWindowSeconds: 300
scaleUp:
# -- List of policies to determine the scale-up behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-up policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
stabilizationWindowSeconds: 0
# -- Configures the Vertical Pod Autoscaler for the controller.
vertical:
# -- Enables the Vertical Pod Autoscaler for the controller.
enabled: false
# -- List of recommenders to use for the Vertical Pod Autoscaler.
# Recommenders are responsible for generating recommendation for the object.
# List should be empty (then the default recommender will generate the recommendation)
# or contain exactly one recommender.
recommenders: []
# recommenders:
# - name: custom-recommender-performance
# -- Configures the resource policy for the Vertical Pod Autoscaler.
resourcePolicy:
# -- Configures the container policies for the Vertical Pod Autoscaler.
containerPolicies:
- containerName: alloy
# -- The controlled resources for the Vertical Pod Autoscaler.
controlledResources:
- cpu
- memory
# -- The controlled values for the Vertical Pod Autoscaler. Needs to be either RequestsOnly or RequestsAndLimits.
controlledValues: "RequestsAndLimits"
# -- The maximum allowed values for the pods.
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# -- Defines the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
# -- Configures the update policy for the Vertical Pod Autoscaler.
updatePolicy:
# -- Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
# minReplicas: 1
# -- Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
# updateMode: Auto
# -- Affinity configuration for pods.
affinity: {}
volumes:
# -- Extra volumes to add to the Grafana Alloy pod.
extra: []
# -- volumeClaimTemplates to add when controller.type is 'statefulset'.
volumeClaimTemplates: []
## -- Additional init containers to run.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
initContainers: []
# -- Additional containers to run alongside the Alloy container and initContainers.
extraContainers: []
service:
# -- Creates a Service for the controller's pods.
enabled: true
# -- Service type
type: ClusterIP
# -- NodePort port. Only takes effect when `service.type: NodePort`
nodePort: 31128
# -- Cluster IP, can be set to None, empty "" or an IP address
clusterIP: ''
# -- Value for internal traffic policy. 'Cluster' or 'Local'
internalTrafficPolicy: Cluster
annotations: {}
# cloud.google.com/load-balancer-type: Internal
serviceMonitor:
enabled: false
# -- Additional labels for the service monitor.
additionalLabels: {}
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# -- MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# -- Customize tls parameters for the service monitor
tlsConfig: {}
# -- RelabelConfigs to apply to samples before scraping
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
ingress:
# -- Enables ingress for Alloy (Faro port)
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
faroPort: 12347
# pathType is only for k8s >= 1.1=
pathType: Prefix
hosts:
- chart-example.local
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- Extra k8s manifests to deploy
extraObjects: []
# - apiVersion: v1
# kind: Secret
# metadata:
# name: grafana-cloud
# stringData:
# PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
# PROMETHEUS_USERNAME: '123456'

View File

@@ -0,0 +1,4 @@
apiVersion: v2
description: 'A Helm chart that collects CustomResourceDefinitions (CRDs) from Mayastor. '
name: crds
version: 2.10.0

View File

@@ -0,0 +1,12 @@
# crds
A Helm chart that collects CustomResourceDefinitions (CRDs) from Mayastor.
## Values
| Key | Description | Default |
|:----|:------------|:--------|
| csi.&ZeroWidthSpace;volumeSnapshots.&ZeroWidthSpace;annotations | Annotations to be added to all CRDs | <pre>{<br><br>}</pre> |
| csi.&ZeroWidthSpace;volumeSnapshots.&ZeroWidthSpace;enabled | Install Volume Snapshot CRDs | `true` |
| csi.&ZeroWidthSpace;volumeSnapshots.&ZeroWidthSpace;keep | Keep CRDs on chart uninstall | `true` |

View File

@@ -0,0 +1,20 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Adds extra annotations to CRDs. This targets two scenarios: preventing CRD recycling in case
the chart is removed; and adding custom annotations.
NOTE: This function assumes the element `metadata.annotations` already exists.
Usage:
{{- include "crds.extraAnnotations" .Values.csi.volumeSnapshots | nindent 4 }}
*/}}
{{- define "crds.extraAnnotations" -}}
{{- if .keep -}}
helm.sh/resource-policy: keep
{{ end }}
{{- with .annotations }}
{{- toYaml . }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,155 @@
{{- if .Values.csi.volumeSnapshots.enabled -}}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-csi/external-snapshotter/pull/814
controller-gen.kubebuilder.io/version: v0.11.3
{{- include "crds.extraAnnotations" .Values.csi.volumeSnapshots | nindent 4 }}
creationTimestamp: null
name: volumesnapshotclasses.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotClass
listKind: VolumeSnapshotClassList
plural: volumesnapshotclasses
shortNames:
- vsclass
- vsclasses
singular: volumesnapshotclass
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: Determines whether a VolumeSnapshotContent created through the
VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage
system uses when creating a volume snapshot. A specific VolumeSnapshotClass
is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
are non-namespaced
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent
created through the VolumeSnapshotClass should be deleted when its bound
VolumeSnapshot is deleted. Supported values are "Retain" and "Delete".
"Retain" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are deleted.
Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this
VolumeSnapshotClass. Required.
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific
parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: true
storage: true
subresources: {}
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: |
Determines whether a VolumeSnapshotContent created through the
VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated;
use snapshot.storage.k8s.io/v1 VolumeSnapshotClass
name: v1beta1
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage
system uses when creating a volume snapshot. A specific VolumeSnapshotClass
is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
are non-namespaced
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent
created through the VolumeSnapshotClass should be deleted when its bound
VolumeSnapshot is deleted. Supported values are "Retain" and "Delete".
"Retain" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are deleted.
Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this
VolumeSnapshotClass. Required.
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific
parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: false
storage: false
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
{{- end -}}

View File

@@ -0,0 +1,499 @@
{{- if .Values.csi.volumeSnapshots.enabled -}}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-csi/external-snapshotter/pull/814
controller-gen.kubebuilder.io/version: v0.11.3
{{- include "crds.extraAnnotations" .Values.csi.volumeSnapshots | nindent 4 }}
creationTimestamp: null
name: volumesnapshotcontents.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotContent
listKind: VolumeSnapshotContentList
plural: volumesnapshotcontents
shortNames:
- vsc
- vscs
singular: volumesnapshotcontent
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical
snapshot on the underlying storage system should be deleted when its bound
VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on
the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent
object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot
object in the underlying storage system
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created
by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent
and its physical snapshot on the underlying storage system should
be deleted when its bound VolumeSnapshot is deleted. Supported values
are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are kept.
"Delete" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are deleted. For dynamically provisioned
snapshots, this field will automatically be filled in by the CSI
snapshotter sidecar with the "DeletionPolicy" field defined in the
corresponding VolumeSnapshotClass. For pre-existing snapshots, users
MUST specify this field when creating the VolumeSnapshotContent
object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the
physical snapshot on the underlying storage system. This MUST be
the same as the name returned by the CSI GetPluginName() call for
that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be)
dynamically provisioned or already exists, and just requires a Kubernetes
object representation. This field is immutable after creation. Required.
oneOf:
- required:
- snapshotHandle
- required:
- volumeHandle
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of
a pre-existing snapshot on the underlying storage system for
which a Kubernetes object representation was (or should be)
created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the
volume from which a snapshot should be dynamically taken from.
This field is immutable.
type: string
type: object
sourceVolumeMode:
description: SourceVolumeMode is the mode of the volume whose snapshot
is taken. Can be either “Filesystem” or “Block”. If not specified,
it indicates the source volume's mode is unknown. This field is
immutable. This field is an alpha field.
type: string
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot
was (or will be) created. Note that after provisioning, the VolumeSnapshotClass
may be deleted or recreated with different set of values, and as
such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object
to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
field must reference to this VolumeSnapshotContent's name for the
bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
object, name and namespace of the VolumeSnapshot object MUST be
provided for binding to happen. This field is immutable after creation.
Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: |
If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: |
Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: |
creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the CSI snapshotter
sidecar with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it indicates
the creation time is unknown. The format of this field is a Unix
nanoseconds time encoded as an int64. On Unix, the command `date
+%s%N` returns the current time in nanoseconds since 1970-01-01
00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation,
if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: |
message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the CSI snapshotter sidecar with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot
in bytes. In dynamic snapshot creation case, this field will be
filled in by the CSI snapshotter sidecar with the "size_bytes" value
returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "size_bytes" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it. When restoring a volume from this snapshot, the size of the
volume MUST NOT be smaller than the restoreSize if it is specified,
otherwise the restoration will fail. If not specified, it indicates
that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot
on the underlying storage system. If not specified, it indicates
that dynamic snapshot creation has either failed or it is still
in progress.
type: string
volumeGroupSnapshotContentName:
description: VolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent
of which this VolumeSnapshotContent is a part of.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical
snapshot on the underlying storage system should be deleted when its bound
VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on
the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent
object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated;
use snapshot.storage.k8s.io/v1 VolumeSnapshotContent
name: v1beta1
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot
object in the underlying storage system
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created
by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent
and its physical snapshot on the underlying storage system should
be deleted when its bound VolumeSnapshot is deleted. Supported values
are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are kept.
"Delete" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are deleted. For dynamically provisioned
snapshots, this field will automatically be filled in by the CSI
snapshotter sidecar with the "DeletionPolicy" field defined in the
corresponding VolumeSnapshotClass. For pre-existing snapshots, users
MUST specify this field when creating the VolumeSnapshotContent
object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the
physical snapshot on the underlying storage system. This MUST be
the same as the name returned by the CSI GetPluginName() call for
that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be)
dynamically provisioned or already exists, and just requires a Kubernetes
object representation. This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of
a pre-existing snapshot on the underlying storage system for
which a Kubernetes object representation was (or should be)
created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the
volume from which a snapshot should be dynamically taken from.
This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot
was (or will be) created. Note that after provisioning, the VolumeSnapshotClass
may be deleted or recreated with different set of values, and as
such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object
to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
field must reference to this VolumeSnapshotContent's name for the
bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
object, name and namespace of the VolumeSnapshot object MUST be
provided for binding to happen. This field is immutable after creation.
Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: |
If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: |
Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the CSI snapshotter
sidecar with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it indicates
the creation time is unknown. The format of this field is a Unix
nanoseconds time encoded as an int64. On Unix, the command `date
+%s%N` returns the current time in nanoseconds since 1970-01-01
00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation,
if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: |
message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the CSI snapshotter sidecar with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot
in bytes. In dynamic snapshot creation case, this field will be
filled in by the CSI snapshotter sidecar with the "size_bytes" value
returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "size_bytes" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it. When restoring a volume from this snapshot, the size of the
volume MUST NOT be smaller than the restoreSize if it is specified,
otherwise the restoration will fail. If not specified, it indicates
that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot
on the underlying storage system. If not specified, it indicates
that dynamic snapshot creation has either failed or it is still
in progress.
type: string
type: object
required:
- spec
type: object
served: false
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
{{- end -}}

View File

@@ -0,0 +1,400 @@
{{- if .Values.csi.volumeSnapshots.enabled -}}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-csi/external-snapshotter/pull/814
controller-gen.kubebuilder.io/version: v0.11.3
{{- include "crds.extraAnnotations" .Values.csi.volumeSnapshots | nindent 4 }}
creationTimestamp: null
name: volumesnapshots.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshot
listKind: VolumeSnapshotList
plural: volumesnapshots
shortNames:
- vs
singular: volumesnapshot
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of
the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing
VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from
this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot
object intends to bind to. Please note that verification of binding actually
requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure
both are pointing at each other. Binding MUST be verified prior to usage of
this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying
storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time
snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested
by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
Required.'
properties:
source:
description: source specifies where a snapshot will be created from.
This field is immutable after creation. Required.
oneOf:
- required:
- persistentVolumeClaimName
- required:
- volumeSnapshotContentName
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the
PersistentVolumeClaim object representing the volume from which
a snapshot should be created. This PVC is assumed to be in the
same namespace as the VolumeSnapshot object. This field should
be set if the snapshot does not exists, and needs to be created.
This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a
pre-existing VolumeSnapshotContent object representing an existing
volume snapshot. This field should be set if the snapshot already
exists and only needs a representation in Kubernetes. This field
is immutable.
type: string
type: object
volumeSnapshotClassName:
description: |
VolumeSnapshotClassName is the name of the VolumeSnapshotClass
requested by the VolumeSnapshot. VolumeSnapshotClassName may be
left nil to indicate that the default SnapshotClass should be used.
A given cluster may have multiple default Volume SnapshotClasses:
one default per CSI Driver. If a VolumeSnapshot does not specify
a SnapshotClass, VolumeSnapshotSource will be checked to figure
out what the associated CSI Driver is, and the default VolumeSnapshotClass
associated with that CSI Driver will be used. If more than one VolumeSnapshotClass
exist for a given CSI Driver and more than one have been marked
as default, CreateSnapshot will fail and generate an event. Empty
string is not allowed for this field.
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot.
Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent
objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: |
boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent
object to which this VolumeSnapshot object intends to bind to. If
not specified, it indicates that the VolumeSnapshot object has not
been successfully bound to a VolumeSnapshotContent object yet. NOTE:
To avoid possible security issues, consumers must verify binding
between VolumeSnapshot and VolumeSnapshotContent objects is successful
(by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the snapshot controller
with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it may indicate
that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation,
if any. This field could be helpful to upper level controllers(i.e.,
application controller) to decide whether they should continue on
waiting for the snapshot to be created based on the type of error
reported. The snapshot controller will keep retrying when an error
occurs during the snapshot creation. Upon success, this error field
will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the snapshot controller with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the minimum size of volume required
to create a volume from this snapshot. In dynamic snapshot creation
case, this field will be filled in by the snapshot controller with
the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call.
For a pre-existing snapshot, this field will be filled with the
"size_bytes" value returned from the CSI "ListSnapshots" gRPC call
if the driver supports it. When restoring a volume from this snapshot,
the size of the volume MUST NOT be smaller than the restoreSize
if it is specified, otherwise the restoration will fail. If not
specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
type: string
x-kubernetes-int-or-string: true
volumeGroupSnapshotName:
description: VolumeGroupSnapshotName is the name of the VolumeGroupSnapshot
of which this VolumeSnapshot is a part of.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of
the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing
VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from
this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot
object intends to bind to. Please note that verification of binding actually
requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure
both are pointing at each other. Binding MUST be verified prior to usage of
this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying
storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated;
use snapshot.storage.k8s.io/v1 VolumeSnapshot
name: v1beta1
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time
snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
spec:
description: |
spec defines the desired characteristics of a snapshot requested
by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
Required.
properties:
source:
description: source specifies where a snapshot will be created from.
This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the
PersistentVolumeClaim object representing the volume from which
a snapshot should be created. This PVC is assumed to be in the
same namespace as the VolumeSnapshot object. This field should
be set if the snapshot does not exists, and needs to be created.
This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a
pre-existing VolumeSnapshotContent object representing an existing
volume snapshot. This field should be set if the snapshot already
exists and only needs a representation in Kubernetes. This field
is immutable.
type: string
type: object
volumeSnapshotClassName:
description: |
VolumeSnapshotClassName is the name of the VolumeSnapshotClass
requested by the VolumeSnapshot. VolumeSnapshotClassName may be
left nil to indicate that the default SnapshotClass should be used.
A given cluster may have multiple default Volume SnapshotClasses:
one default per CSI Driver. If a VolumeSnapshot does not specify
a SnapshotClass, VolumeSnapshotSource will be checked to figure
out what the associated CSI Driver is, and the default VolumeSnapshotClass
associated with that CSI Driver will be used. If more than one VolumeSnapshotClass
exist for a given CSI Driver and more than one have been marked
as default, CreateSnapshot will fail and generate an event. Empty
string is not allowed for this field.
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot.
Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent
objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: |
boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent
object to which this VolumeSnapshot object intends to bind to. If
not specified, it indicates that the VolumeSnapshot object has not
been successfully bound to a VolumeSnapshotContent object yet. NOTE:
To avoid possible security issues, consumers must verify binding
between VolumeSnapshot and VolumeSnapshotContent objects is successful
(by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the snapshot controller
with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it may indicate
that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation,
if any. This field could be helpful to upper level controllers(i.e.,
application controller) to decide whether they should continue on
waiting for the snapshot to be created based on the type of error
reported. The snapshot controller will keep retrying when an error
occurs during the snapshot creation. Upon success, this error field
will be cleared.
properties:
message:
description: |
message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the snapshot controller with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the minimum size of volume required
to create a volume from this snapshot. In dynamic snapshot creation
case, this field will be filled in by the snapshot controller with
the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call.
For a pre-existing snapshot, this field will be filled with the
"size_bytes" value returned from the CSI "ListSnapshots" gRPC call
if the driver supports it. When restoring a volume from this snapshot,
the size of the volume MUST NOT be smaller than the restoreSize
if it is specified, otherwise the restoration will fail. If not
specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
type: string
x-kubernetes-int-or-string: true
type: object
required:
- spec
type: object
served: false
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
{{- end -}}

View File

@@ -0,0 +1,10 @@
csi:
volumeSnapshots:
# -- Install Volume Snapshot CRDs
enabled: true
# -- Keep CRDs on chart uninstall
keep: true
# -- Annotations to be added to all CRDs
annotations: {}
# Example for Argo CD to prevent CRDs from being recycled
# argocd.argoproj.io/sync-options: Prune=false

View File

@@ -0,0 +1,25 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# img folder
img/
# Changelog
CHANGELOG.md

View File

@@ -0,0 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.31.1
digest: sha256:38d6de6fd62a10417ff51df8b2c5e0cf294de80fe393c4e9f3247ca6433718fa
generated: "2025-05-23T12:49:29.352367+02:00"

View File

@@ -0,0 +1,35 @@
annotations:
category: Database
images: |
- name: etcd
image: docker.io/bitnami/etcd:3.6.4-debian-12-r0
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r49
licenses: Apache-2.0
tanzuCategory: service
apiVersion: v2
appVersion: 3.6.4
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
tags:
- bitnami-common
version: 2.x.x
description: etcd is a distributed key-value store designed to securely store data
across a cluster. etcd is widely used in production on account of its reliability,
fault-tolerance and ease of use.
home: https://bitnami.com
icon: https://dyltqmyl993wv.cloudfront.net/assets/stacks/etcd/img/etcd-stack-220x234.png
keywords:
- etcd
- cluster
- database
- cache
- key-value
maintainers:
- name: Broadcom, Inc. All Rights Reserved.
url: https://github.com/bitnami/charts
name: etcd
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/etcd
version: 12.0.14

View File

@@ -0,0 +1,895 @@
<!--- app-name: Etcd -->
# Bitnami package for Etcd
etcd is a distributed key-value store designed to securely store data across a cluster. etcd is widely used in production on account of its reliability, fault-tolerance and ease of use.
[Overview of Etcd](https://etcd.io/)
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
## TL;DR
```console
helm install my-release oci://registry-1.docker.io/bitnamicharts/etcd
```
Looking to use Etcd in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog.
## ⚠️ Important Notice: Upcoming changes to the Bitnami Catalog
Beginning August 28th, 2025, Bitnami will evolve its public catalog to offer a curated set of hardened, security-focused images under the new [Bitnami Secure Images initiative](https://news.broadcom.com/app-dev/broadcom-introduces-bitnami-secure-images-for-production-ready-containerized-applications). As part of this transition:
- Granting community users access for the first time to security-optimized versions of popular container images.
- Bitnami will begin deprecating support for non-hardened, Debian-based software images in its free tier and will gradually remove non-latest tags from the public catalog. As a result, community users will have access to a reduced number of hardened images. These images are published only under the “latest” tag and are intended for development purposes
- Starting August 28th, over two weeks, all existing container images, including older or versioned tags (e.g., 2.50.0, 10.6), will be migrated from the public catalog (docker.io/bitnami) to the “Bitnami Legacy” repository (docker.io/bitnamilegacy), where they will no longer receive updates.
- For production workloads and long-term support, users are encouraged to adopt Bitnami Secure Images, which include hardened containers, smaller attack surfaces, CVE transparency (via VEX/KEV), SBOMs, and enterprise support.
These changes aim to improve the security posture of all Bitnami users by promoting best practices for software supply chain integrity and up-to-date deployments. For more details, visit the [Bitnami Secure Images announcement](https://github.com/bitnami/containers/issues/83267).
## Introduction
This chart bootstraps a [etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.23+
- Helm 3.8.0+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
These commands deploy etcd on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Configuration and installation details
### Resource requests and limits
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Prometheus metrics
This chart can be integrated with Prometheus by setting `metrics.enabled` to true. This will expose the etcd native Prometheus port in the container and service (if `metrics.useSeparateEndpoint=true`). It will all have the necessary annotations to be automatically scraped by Prometheus.
#### Prometheus requirements
It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster.
#### Integration with Prometheus Operator
The chart can deploy `PodMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `*.metrics.podMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error:
```text
no matches for kind "PodMonitor" in version "monitoring.coreos.com/v1"
```
Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator.
### Update credentials
Bitnami charts configure credentials at first boot. Any further change in the secrets or credentials require manual intervention. Follow these instructions:
- Update the user password following [the upstream documentation](https://etcd.io/docs/latest/op-guide/authentication/)
- Update the password secret with the new values (replace the SECRET_NAME and PASSWORD placeholders)
```shell
kubectl create secret generic SECRET_NAME --from-literal=etcd-root-password=PASSWORD --dry-run -o yaml | kubectl apply -f -
```
### Cluster configuration
The Bitnami etcd chart can be used to bootstrap an etcd cluster, easy to scale and with available features to implement disaster recovery. It uses static discovery configured via environment variables to bootstrap the etcd cluster. Based on the number of initial replicas, and using the A records added to the DNS configuration by the headless service, the chart can calculate every advertised peer URL.
The chart makes use of some extra elements offered by Kubernetes to ensure the bootstrapping is successful:
- It sets a "Parallel" Pod Management Policy. This is critical, since all the etcd replicas should be created simultaneously to guarantee they can find each other.
- It records "not ready" pods in the DNS, so etcd replicas are reachable using their associated FQDN before they're actually ready.
Learn more about [etcd discovery](https://etcd.io/docs/current/op-guide/clustering/#discovery), [Pod Management Policies](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies) and [recording "not ready" pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-hostname-and-subdomain-fields).
Here is an example of the environment configuration bootstrapping an etcd cluster with 3 replicas:
| Member | Variable | Value |
|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 0 | ETCD_NAME | etcd-0 |
| 0 | ETCD_INITIAL_ADVERTISE_PEER_URLS | <http://etcd-0.etcd-headless.default.svc.cluster.local:2380> |
|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 1 | ETCD_NAME | etcd-1 |
| 1 | ETCD_INITIAL_ADVERTISE_PEER_URLS | <http://etcd-1.etcd-headless.default.svc.cluster.local:2380> |
|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 2 | ETCD_NAME | etcd-2 |
| 2 | ETCD_INITIAL_ADVERTISE_PEER_URLS | <http://etcd-2.etcd-headless.default.svc.cluster.local:2380> |
|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| * | ETCD_INITIAL_CLUSTER_TOKEN | etcd-cluster-k8s |
| * | ETCD_INITIAL_CLUSTER | etcd-0=<http://etcd-0.etcd-headless.default.svc.cluster.local:2380>,etcd-1=<http://etcd-1.etcd-headless.default.svc.cluster.local:2380>,etcd-2=<http://etcd-2.etcd-headless.default.svc.cluster.local:2380> |
The probes (readiness & liveness) are delayed 60 seconds by default, to give the etcd replicas time to start and find each other. After that period, the *etcdctl endpoint health* command is used to periodically perform health checks on every replica.
#### Scalability
The Bitnami etcd chart uses etcd reconfiguration operations to add/remove members of the cluster during scaling.
When scaling down, a "pre-stop" lifecycle hook is used to ensure that the `etcdctl member remove` command is executed. The hook stores the output of this command in the persistent volume attached to the etcd pod. This hook is also executed when the pod is manually removed using the `kubectl delete pod` command or rescheduled by Kubernetes for any reason. This implies that the cluster can be scaled up/down without human intervention.
Here is an example to explain how this works:
1. An etcd cluster with three members running on a three-nodes Kubernetes cluster is bootstrapped.
2. After a few days, the cluster administrator decides to upgrade the kernel on one of the cluster nodes. To do so, the administrator drains the node. Pods running on that node are rescheduled to a different one.
3. During the pod eviction process, the "pre-stop" hook removes the etcd member from the cluster. Thus, the etcd cluster is scaled down to only two members.
4. Once the pod is scheduled on another node and initialized, the etcd member is added again to the cluster using the *etcdctl member add* command. Thus, the etcd cluster is scaled up to three replicas.
If, for whatever reason, the "pre-stop" hook fails at removing the member, the initialization logic is able to detect that something went wrong by checking the `etcdctl member remove` command output that was stored in the persistent volume. It then uses the `etcdctl member update` command to add back the member. In this case, the cluster isn't automatically scaled down/up while the pod is recovered. Therefore, when other members attempt to connect to the pod, it may cause warnings or errors like the one below:
```text
E | rafthttp: failed to dial XXXXXXXX on stream Message (peer XXXXXXXX failed to find local node YYYYYYYYY)
I | rafthttp: peer XXXXXXXX became inactive (message send to peer failed)
W | rafthttp: health check for peer XXXXXXXX could not connect: dial tcp A.B.C.D:2380: i/o timeout
```
Learn more about [etcd runtime configuration](https://etcd.io/docs/current/op-guide/runtime-configuration/) and how to safely [drain a Kubernetes node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/).
#### Cluster updates
When updating the etcd StatefulSet (such as when upgrading the chart version via the *helm upgrade* command), every pod must be replaced following the StatefulSet update strategy.
The chart uses a "RollingUpdate" strategy by default and with default Kubernetes values. In other words, it updates each Pod, one at a time, in the same order as Pod termination (from the largest ordinal to the smallest). It will wait until an updated Pod is "Running" and "Ready" prior to updating its predecessor.
Learn more about [StatefulSet update strategies](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies).
#### Disaster recovery
If, for whatever reason, (N-1)/2 members of the cluster fail and the "pre-stop" hooks also fail at removing them from the cluster, the cluster disastrously fails, irrevocably losing quorum. Once quorum is lost, the cluster cannot reach consensus and therefore cannot continue accepting updates. Under this circumstance, the only possible solution is usually to restore the cluster from a snapshot.
> IMPORTANT: All members should restore using the same snapshot.
The Bitnami etcd chart solves this problem by optionally offering a Kubernetes cron job that periodically snapshots the keyspace and stores it in a RWX volume. In case the cluster disastrously fails, the pods will automatically try to restore it using the last avalable snapshot.
[Learn how to enable this disaster recovery feature](#enable-disaster-recovery-features).
The chart also sets by default a "soft" Pod AntiAffinity to reduce the risk of the cluster failing disastrously.
Learn more about [etcd recovery](https://etcd.io/docs/current/op-guide/recovery), [Kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) and [pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)
### Enable security for etcd
The etcd chart can be configured with Role-based access control and TLS encryption to improve its security.
#### Configure RBAC
In order to enable Role-Based Access Control for etcd, set the following parameters:
```text
auth.rbac.create=true
auth.rbac.rootPassword=ETCD_ROOT_PASSWORD
```
These parameters create a `root` user with an associate `root` role with access to everything. The remaining users will use the `guest` role and won't have permissions to do anything.
#### Configure TLS for server-to-server communications
In order to enable secure transport between peer nodes deploy the helm chart with these options:
```text
auth.peer.secureTransport=true
auth.peer.useAutoTLS=true
```
#### Configure certificates for client communication
In order to enable secure transport between client and server, create a secret containing the certificate and key files and the CA used to sign the client certificates. In this case, create the secret and then deploy the chart with these options:
```text
auth.client.secureTransport=true
auth.client.enableAuthentication=true
auth.client.existingSecret=etcd-client-certs
```
Learn more about the [etcd security model](https://etcd.io/) and how to [generate self-signed certificates for etcd](https://coreos.com/os/docs/latest/generate-self-signed-certificates.html).
### Enable disaster recovery features
The Bitnami etcd Helm chart supports automatic disaster recovery by periodically snapshotting the keyspace. If the cluster permanently loses more than (N-1)/2 members, it tries to recover the cluster from a previous snapshot.
Enable this feature with the following parameters:
```text
persistence.enabled=true
disasterRecovery.enabled=true
disasterRecovery.pvc.size=2Gi
disasterRecovery.pvc.storageClassName=nfs
```
If the `startFromSnapshot.*` parameters are used at the same time as the `disasterRecovery.*` parameters, the PVC provided via the `startFromSnapshot.existingClaim` parameter will be used to store the periodical snapshots.
> NOTE: The disaster recovery feature requires volumes with ReadWriteMany access mode.
### Backup and restore
Two different approaches are available to back up and restore this Helm Chart:
- Back up the data from the source deployment and restore it in a new deployment using etcd's built-in backup/restore tools.
- Back up the persistent volumes from the source deployment and attach them to a new deployment using Velero, a Kubernetes backup/restore tool.
#### Method 1: Backup and restore data using etcd's built-in tools
This method involves the following steps:
- Use the *etcdctl* tool to create a snapshot of the data in the source cluster.
- Make the snapshot available in a Kubernetes PersistentVolumeClaim (PVC) that supports ReadWriteMany access (for example, a PVC created with the NFS storage class)
- Restore the data snapshot in a new cluster using the <%= variable :catalog_name, :platform %> etcd Helm chart's *startFromSnapshot.existingClaim* and *startFromSnapshot.snapshotFilename* parameters to define the source PVC and source filename for the snapshot.
> NOTE: Under this approach, it is important to create the new deployment on the destination cluster using the same credentials as the original deployment on the source cluster.
#### Method 2: Back up and restore persistent data volumes
This method involves copying the persistent data volumes for the etcd nodes and reusing them in a new deployment with [Velero](https://velero.io/), an open source Kubernetes backup/restore tool. This method is only suitable when:
- The Kubernetes provider is [supported by Velero](https://velero.io/docs/latest/supported-providers/).
- Both clusters are on the same Kubernetes provider, as this is a requirement of [Velero's native support for migrating persistent volumes](https://velero.io/docs/latest/migration-case/).
- The restored deployment on the destination cluster will have the same name, namespace, topology and credentials as the original deployment on the source cluster.
This method involves the following steps:
- Install Velero on the source and destination clusters.
- Use Velero to back up the PersistentVolumes (PVs) used by the etcd deployment on the source cluster.
- Use Velero to restore the backed-up PVs on the destination cluster.
- Create a new etcd deployment on the destination cluster with the same deployment name, credentials and other parameters as the original. This new deployment will use the restored PVs and hence the original data.
### Exposing etcd metrics
The metrics exposed by etcd can be exposed to be scraped by Prometheus. Metrics can be scraped from within the cluster using any of the following approaches:
- Adding the required annotations for Prometheus to discover the metrics endpoints, as in the example below:
```yaml
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics/cluster"
prometheus.io/port: "9000"
```
- Creating a ServiceMonitor or PodMonitor entry (when the Prometheus Operator is available in the cluster)
- Using something similar to the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint.
### Using custom configuration
In order to use custom configuration parameters, two options are available:
- Using environment variables: etcd allows setting environment variables that map to configuration settings. In order to set extra environment variables, you can use the `extraEnvVars` property. Alternatively, you can use a ConfigMap or a Secret with the environment variables using the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties.
```yaml
extraEnvVars:
- name: ETCD_AUTO_COMPACTION_RETENTION
value: "0"
- name: ETCD_HEARTBEAT_INTERVAL
value: "150"
```
- Using a custom `etcd.conf.yml`: The etcd chart allows mounting a custom `etcd.conf.yml` file as ConfigMap. In order to so, you can use the `configuration` property. Alternatively, you can use an existing ConfigMap using the `existingConfigmap` parameter.
### Auto Compaction
Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace.
`autoCompactionMode`, by default periodic. Valid values: "periodic", "revision".
- 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. "5m").
- 'revision' for revision number based retention.
`autoCompactionRetention` for mvcc key value store in hour, by default 0, means disabled.
You can enable auto compaction by using following parameters:
```console
autoCompactionMode=periodic
autoCompactionRetention=10m
```
### Sidecars and Init Containers
If you have a need for additional containers to run within the same pod as the etcd app (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
```yaml
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
Similarly, you can add extra init containers using the `initContainers` parameter.
```yaml
initContainers:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
### Deploying extra resources
There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter.
### Setting Pod's affinity
This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
## Persistence
The [Bitnami etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) image stores the etcd data at the `/bitnami/etcd` path of the container. Persistent Volume Claims are used to keep the data across statefulsets.
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose.
If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
## Parameters
### Global parameters
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` |
| `global.security.allowInsecureImages` | Allows skipping image verification | `false` |
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` |
### Common parameters
| Name | Description | Value |
| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
| `namespaceOverride` | String to fully override common.names.namespace template | `""` |
| `commonLabels` | Labels to add to all deployed objects | `{}` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
### etcd parameters
| Name | Description | Value |
| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `image.registry` | etcd image registry | `REGISTRY_NAME` |
| `image.repository` | etcd image name | `REPOSITORY_NAME/etcd` |
| `image.digest` | etcd image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | etcd image pull policy | `IfNotPresent` |
| `image.pullSecrets` | etcd image pull secrets | `[]` |
| `image.debug` | Enable image debug mode | `false` |
| `auth.rbac.create` | Switch to enable RBAC authentication | `true` |
| `auth.rbac.allowNoneAuthentication` | Allow to use etcd without configuring RBAC authentication | `true` |
| `auth.rbac.rootPassword` | Root user password. The root user is always `root` | `""` |
| `auth.rbac.existingSecret` | Name of the existing secret containing credentials for the root user | `""` |
| `auth.rbac.existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `""` |
| `auth.token.enabled` | Enables token authentication | `true` |
| `auth.token.type` | Authentication token type. Allowed values: 'simple' or 'jwt' | `jwt` |
| `auth.token.privateKey.filename` | Name of the file containing the private key for signing the JWT token | `jwt-token.pem` |
| `auth.token.privateKey.existingSecret` | Name of the existing secret containing the private key for signing the JWT token | `""` |
| `auth.token.signMethod` | JWT token sign method | `RS256` |
| `auth.token.ttl` | JWT token TTL | `10m` |
| `auth.client.secureTransport` | Switch to encrypt client-to-server communications using TLS certificates | `false` |
| `auth.client.useAutoTLS` | Switch to automatically create the TLS certificates | `false` |
| `auth.client.existingSecret` | Name of the existing secret containing the TLS certificates for client-to-server communications | `""` |
| `auth.client.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` |
| `auth.client.certFilename` | Name of the file containing the client certificate | `cert.pem` |
| `auth.client.certKeyFilename` | Name of the file containing the client certificate private key | `key.pem` |
| `auth.client.caFilename` | Name of the file containing the client CA certificate | `""` |
| `auth.peer.secureTransport` | Switch to encrypt server-to-server communications using TLS certificates | `false` |
| `auth.peer.useAutoTLS` | Switch to automatically create the TLS certificates | `false` |
| `auth.peer.existingSecret` | Name of the existing secret containing the TLS certificates for server-to-server communications | `""` |
| `auth.peer.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` |
| `auth.peer.certFilename` | Name of the file containing the peer certificate | `cert.pem` |
| `auth.peer.certKeyFilename` | Name of the file containing the peer certificate private key | `key.pem` |
| `auth.peer.caFilename` | Name of the file containing the peer CA certificate | `""` |
| `autoCompactionMode` | Auto compaction mode, by default periodic. Valid values: "periodic", "revision". | `""` |
| `autoCompactionRetention` | Auto compaction retention for mvcc key value store in hour, by default 0, means disabled | `""` |
| `initialClusterToken` | Initial cluster token. Can be used to protect etcd from cross-cluster-interaction, which might corrupt the clusters. | `etcd-cluster-k8s` |
| `logLevel` | Sets the log level for the etcd process. Allowed values: 'debug', 'info', 'warn', 'error', 'panic', 'fatal' | `info` |
| `maxProcs` | Limits the number of operating system threads that can execute user-level | `""` |
| `configuration` | etcd configuration. Specify content for etcd.conf.yml | `""` |
| `existingConfigmap` | Existing ConfigMap with etcd configuration | `""` |
| `extraEnvVars` | Extra environment variables to be set on etcd container | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` |
| `command` | Default container command (useful when using custom images) | `[]` |
| `args` | Default container args (useful when using custom images) | `[]` |
### etcd statefulset parameters
| Name | Description | Value |
| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
| `replicaCount` | Number of etcd replicas to deploy | `1` |
| `updateStrategy.type` | Update strategy type, can be set to RollingUpdate or OnDelete. | `RollingUpdate` |
| `podManagementPolicy` | Pod management policy for the etcd statefulset | `Parallel` |
| `automountServiceAccountToken` | Mount Service Account token in pod | `false` |
| `hostAliases` | etcd pod host aliases | `[]` |
| `lifecycleHooks` | Override default etcd container hooks | `{}` |
| `containerPorts.client` | Client port to expose at container level | `2379` |
| `containerPorts.peer` | Peer port to expose at container level | `2380` |
| `containerPorts.metrics` | Metrics port to expose at container level when metrics.useSeparateEndpoint is true | `9090` |
| `podSecurityContext.enabled` | Enabled etcd pods' Security Context | `true` |
| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `podSecurityContext.fsGroup` | Set etcd pod's Security Context fsGroup | `1001` |
| `containerSecurityContext.enabled` | Enabled etcd containers' Security Context | `true` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `containerSecurityContext.runAsUser` | Set etcd containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsGroup` | Set etcd containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set Controller container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.privileged` | Set primary container's Security Context privileged | `false` |
| `containerSecurityContext.allowPrivilegeEscalation` | Set primary container's Security Context allowPrivilegeEscalation | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Override default liveness probe | `{}` |
| `customReadinessProbe` | Override default readiness probe | `{}` |
| `customStartupProbe` | Override default startup probe | `{}` |
| `extraVolumes` | Optionally specify extra list of additional volumes for etcd pods | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for etcd container(s) | `[]` |
| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volumeClaimTemplates for etcd container(s) | `[]` |
| `initContainers` | Add additional init containers to the etcd pods | `[]` |
| `sidecars` | Add additional sidecar containers to the etcd pods | `[]` |
| `podAnnotations` | Annotations for etcd pods | `{}` |
| `podLabels` | Extra labels for etcd pods | `{}` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Tolerations for pod assignment | `[]` |
| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` |
| `schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `priorityClassName` | Name of the priority class to be used by etcd pods | `""` |
| `runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` |
| `shareProcessNamespace` | Enable shared process namespace in a pod. | `false` |
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
| `persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` |
| `persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` |
| `persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` |
### Traffic exposure parameters
| Name | Description | Value |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.enabled` | create second service if equal true | `true` |
| `service.clusterIP` | Kubernetes service Cluster IP | `""` |
| `service.ports.client` | etcd client port | `2379` |
| `service.ports.peer` | etcd peer port | `2380` |
| `service.ports.metrics` | etcd metrics port when metrics.useSeparateEndpoint is true | `9090` |
| `service.nodePorts.client` | Specify the nodePort client value for the LoadBalancer and NodePort service types. | `""` |
| `service.nodePorts.peer` | Specify the nodePort peer value for the LoadBalancer and NodePort service types. | `""` |
| `service.nodePorts.metrics` | Specify the nodePort metrics value for the LoadBalancer and NodePort service types. The metrics port is only exposed when metrics.useSeparateEndpoint is true. | `""` |
| `service.clientPortNameOverride` | etcd client port name override | `""` |
| `service.peerPortNameOverride` | etcd peer port name override | `""` |
| `service.metricsPortNameOverride` | etcd metrics port name override. The metrics port is only exposed when metrics.useSeparateEndpoint is true. | `""` |
| `service.loadBalancerIP` | loadBalancerIP for the etcd service (optional, cloud specific) | `""` |
| `service.loadBalancerClass` | loadBalancerClass for the etcd service (optional, cloud specific) | `""` |
| `service.loadBalancerSourceRanges` | Load Balancer source ranges | `[]` |
| `service.externalIPs` | External IPs | `[]` |
| `service.externalTrafficPolicy` | %%MAIN_CONTAINER_NAME%% service external traffic policy | `Cluster` |
| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `service.annotations` | Additional annotations for the etcd service | `{}` |
| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `service.headless.annotations` | Annotations for the headless service. | `{}` |
### Persistence parameters
| Name | Description | Value |
| -------------------------- | --------------------------------------------------------------- | ------------------- |
| `persistence.enabled` | If true, use a Persistent Volume Claim. If false, use emptyDir. | `true` |
| `persistence.storageClass` | Persistent Volume Storage Class | `""` |
| `persistence.annotations` | Annotations for the PVC | `{}` |
| `persistence.labels` | Labels for the PVC | `{}` |
| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
| `persistence.size` | PVC Storage Request for etcd data volume | `8Gi` |
| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` |
### Volume Permissions parameters
| Name | Description | Value |
| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `REPOSITORY_NAME/os-shell` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` |
| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
### Network Policy parameters
| Name | Description | Value |
| --------------------------------------- | --------------------------------------------------------------- | ------ |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` |
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
### Metrics parameters
| Name | Description | Value |
| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ------------ |
| `metrics.enabled` | Expose etcd metrics | `false` |
| `metrics.useSeparateEndpoint` | Use a separate endpoint for exposing metrics | `false` |
| `metrics.podAnnotations` | Annotations for the Prometheus metrics on etcd pods | `{}` |
| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.podMonitor.namespace` | Namespace in which Prometheus is running | `monitoring` |
| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` |
| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` |
| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` |
| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` |
| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` |
| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` |
| `metrics.prometheusRule.enabled` | Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` |
| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` |
### Snapshotting parameters
| Name | Description | Value |
| ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
| `startFromSnapshot.enabled` | Initialize new cluster recovering an existing snapshot | `false` |
| `startFromSnapshot.existingClaim` | Existing PVC containing the etcd snapshot | `""` |
| `startFromSnapshot.snapshotFilename` | Snapshot filename | `""` |
| `disasterRecovery.enabled` | Enable auto disaster recovery by periodically snapshotting the keyspace | `false` |
| `disasterRecovery.cronjob.schedule` | Schedule in Cron format to save snapshots | `*/30 * * * *` |
| `disasterRecovery.cronjob.historyLimit` | Number of successful finished jobs to retain | `1` |
| `disasterRecovery.cronjob.snapshotHistoryLimit` | Number of etcd snapshots to retain, tagged by date | `1` |
| `disasterRecovery.cronjob.snapshotsDir` | Directory to store snapshots | `/snapshots` |
| `disasterRecovery.cronjob.podAnnotations` | Pod annotations for cronjob pods | `{}` |
| `disasterRecovery.cronjob.podSecurityContext.enabled` | Enable security context for Snapshotter pods | `true` |
| `disasterRecovery.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `disasterRecovery.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `disasterRecovery.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `disasterRecovery.cronjob.podSecurityContext.fsGroup` | Group ID for the Snapshotter filesystem | `1001` |
| `disasterRecovery.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `disasterRecovery.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `disasterRecovery.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `disasterRecovery.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `disasterRecovery.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `disasterRecovery.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `disasterRecovery.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `disasterRecovery.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `disasterRecovery.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `disasterRecovery.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `disasterRecovery.cronjob.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if disasterRecovery.cronjob.resources is set (disasterRecovery.cronjob.resources is recommended for production). | `nano` |
| `disasterRecovery.cronjob.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `disasterRecovery.cronjob.nodeSelector` | Node labels for cronjob pods assignment | `{}` |
| `disasterRecovery.cronjob.tolerations` | Tolerations for cronjob pods assignment | `[]` |
| `disasterRecovery.cronjob.podLabels` | Labels that will be added to pods created by cronjob | `{}` |
| `disasterRecovery.cronjob.serviceAccountName` | Specifies the service account to use for disaster recovery cronjob | `""` |
| `disasterRecovery.cronjob.command` | Override default snapshot container command (useful when you want to customize the snapshot logic) | `[]` |
| `disasterRecovery.pvc.existingClaim` | A manually managed Persistent Volume and Claim | `""` |
| `disasterRecovery.pvc.size` | PVC Storage Request | `2Gi` |
| `disasterRecovery.pvc.storageClassName` | Storage Class for snapshots volume | `nfs` |
| `disasterRecovery.pvc.subPath` | Path within the volume from which to mount | `""` |
### Service account parameters
| Name | Description | Value |
| --------------------------------------------- | ------------------------------------------------------------ | ------- |
| `serviceAccount.create` | Enable/disable service account creation | `true` |
| `serviceAccount.name` | Name of the service account to create or use | `""` |
| `serviceAccount.automountServiceAccountToken` | Enable/disable auto mounting of service account token | `false` |
| `serviceAccount.annotations` | Additional annotations to be included on the service account | `{}` |
| `serviceAccount.labels` | Additional labels to be included on the service account | `{}` |
### etcd "pre-upgrade" K8s Job parameters
| Name | Description | Value |
| ----------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
| `preUpgradeJob.enabled` | Enable running a pre-upgrade job on Helm upgrades that removes obsolete members | `true` |
| `preUpgradeJob.annotations` | Add annotations to the etcd "pre-upgrade" job | `{}` |
| `preUpgradeJob.podLabels` | Additional pod labels for etcd "pre-upgrade" job | `{}` |
| `preUpgradeJob.podAnnotations` | Additional pod annotations for etcd "pre-upgrade" job | `{}` |
| `preUpgradeJob.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `preUpgradeJob.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `preUpgradeJob.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `preUpgradeJob.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` |
| `preUpgradeJob.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `preUpgradeJob.affinity` | Affinity for pod assignment | `{}` |
| `preUpgradeJob.nodeSelector` | Node labels for pod assignment | `{}` |
| `preUpgradeJob.tolerations` | Tolerations for pod assignment | `[]` |
| `preUpgradeJob.containerSecurityContext.enabled` | Enabled "pre-upgrade" job's containers' Security Context | `true` |
| `preUpgradeJob.containerSecurityContext.seLinuxOptions` | Set SELinux options in "pre-upgrade" job's containers | `{}` |
| `preUpgradeJob.containerSecurityContext.runAsUser` | Set runAsUser in "pre-upgrade" job's containers' Security Context | `1001` |
| `preUpgradeJob.containerSecurityContext.runAsGroup` | Set runAsUser in "pre-upgrade" job's containers' Security Context | `1001` |
| `preUpgradeJob.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "pre-upgrade" job's containers' Security Context | `true` |
| `preUpgradeJob.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "pre-upgrade" job's containers' Security Context | `true` |
| `preUpgradeJob.containerSecurityContext.privileged` | Set privileged in "pre-upgrade" job's containers' Security Context | `false` |
| `preUpgradeJob.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "pre-upgrade" job's containers' Security Context | `false` |
| `preUpgradeJob.containerSecurityContext.capabilities.add` | List of capabilities to be added in "pre-upgrade" job's containers | `[]` |
| `preUpgradeJob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "pre-upgrade" job's containers | `["ALL"]` |
| `preUpgradeJob.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "pre-upgrade" job's containers | `RuntimeDefault` |
| `preUpgradeJob.podSecurityContext.enabled` | Enabled "pre-upgrade" job's pods' Security Context | `true` |
| `preUpgradeJob.podSecurityContext.fsGroupChangePolicy` | Set fsGroupChangePolicy in "pre-upgrade" job's pods' Security Context | `Always` |
| `preUpgradeJob.podSecurityContext.sysctls` | List of sysctls to allow in "pre-upgrade" job's pods' Security Context | `[]` |
| `preUpgradeJob.podSecurityContext.supplementalGroups` | List of supplemental groups to add to "pre-upgrade" job's pods' Security Context | `[]` |
| `preUpgradeJob.podSecurityContext.fsGroup` | Set fsGroup in "pre-upgrade" job's pods' Security Context | `1001` |
| `preUpgradeJob.resourcesPreset` | Set etcd "pre-upgrade" job's container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if preUpgradeJob.resources is set (preUpgradeJob.resources is recommended for production). | `micro` |
| `preUpgradeJob.resources` | Set etcd "pre-upgrade" job's container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `preUpgradeJob.startDelay` | Optional delay before starting the pre-upgrade hook (in seconds). | `""` |
### Defragmentation parameters
| Name | Description | Value |
| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | ---------------- |
| `defrag.enabled` | Enable automatic defragmentation. This is most effective when paired with auto compaction: consider setting "autoCompactionRetention > 0". | `false` |
| `defrag.cronjob.startingDeadlineSeconds` | Number of seconds representing the deadline for starting the job if it misses scheduled time for any reason | `""` |
| `defrag.cronjob.schedule` | Schedule in Cron format to defrag (daily at midnight by default) | `0 0 * * *` |
| `defrag.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Forbid` |
| `defrag.cronjob.suspend` | Boolean that indicates if the controller must suspend subsequent executions (not applied to already started executions) | `false` |
| `defrag.cronjob.successfulJobsHistoryLimit` | Number of successful finished jobs to retain | `1` |
| `defrag.cronjob.failedJobsHistoryLimit` | Number of failed finished jobs to retain | `1` |
| `defrag.cronjob.labels` | Additional labels to be added to the Defrag cronjob | `{}` |
| `defrag.cronjob.annotations` | Annotations to be added to the Defrag cronjob | `{}` |
| `defrag.cronjob.activeDeadlineSeconds` | Number of seconds relative to the startTime that the job may be continuously active before the system tries to terminate it | `""` |
| `defrag.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` |
| `defrag.cronjob.podLabels` | Labels that will be added to pods created by Defrag cronjob | `{}` |
| `defrag.cronjob.podAnnotations` | Pod annotations for Defrag cronjob pods | `{}` |
| `defrag.cronjob.podSecurityContext.enabled` | Enable security context for Defrag pods | `true` |
| `defrag.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `defrag.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `defrag.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `defrag.cronjob.podSecurityContext.fsGroup` | Group ID for the Defrag filesystem | `1001` |
| `defrag.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `defrag.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `defrag.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `defrag.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `defrag.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `defrag.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `defrag.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `defrag.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `defrag.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `defrag.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `defrag.cronjob.nodeSelector` | Node labels for pod assignment in Defrag cronjob | `{}` |
| `defrag.cronjob.tolerations` | Tolerations for pod assignment in Defrag cronjob | `[]` |
| `defrag.cronjob.serviceAccountName` | Specifies the service account to use for Defrag cronjob | `""` |
| `defrag.cronjob.command` | Override default container command for defragmentation (useful when using custom images) | `[]` |
| `defrag.cronjob.args` | Override default container args (useful when using custom images) | `[]` |
| `defrag.cronjob.resourcesPreset` | Set container resources according to one common preset | `nano` |
| `defrag.cronjob.resources` | Set container requests and limits for different resources like CPU or | `{}` |
| `defrag.cronjob.extraEnvVars` | Extra environment variables to be set on defrag cronjob container | `[]` |
| `defrag.cronjob.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `defrag.cronjob.extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` |
### Other parameters
| Name | Description | Value |
| -------------------- | -------------------------------------------------------------- | ------ |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` |
| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `51%` |
| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set auth.rbac.rootPassword=secretpassword oci://REGISTRY_NAME/REPOSITORY_NAME/etcd
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
The above command sets the etcd `root` account password to `secretpassword`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/etcd
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/etcd/values.yaml)
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 11.0.0
This version introduces the following breaking changes:
- Remove `initialClusterState` which was unreliable at detecting cluster state. From now on, each node will contact other members to determine cluster state. If no members are available and the data dir is empty, then it bootstraps a new cluster.
- Remove `removeMemberOnContainerTermination` which was unreliable at removing stale members during replica count updates. Instead, a pre-upgrade hook is added to check and remove stale members.
- Remove support for manual scaling with `kubectl` or autoscaler. Upgrading of any kind including increasing replica count must be done with `helm upgrade` exclusively. CD automation tools that respect Helm hooks such as ArgoCD can also be used.
### To 10.7.0
This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850).
### To 10.0.0
This major bump changes the following security defaults:
- `runAsGroup` is changed from `0` to `1001`
- `readOnlyRootFilesystem` is set to `true`
- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case).
- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`.
This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones.
### To 9.0.0
This version adds a new label `app.kubernetes.io/component=etcd` to the StatefulSet and pods. Due to this change, the StatefulSet will be replaced (as it's not possible to add additional `spec.selector.matchLabels` to an existing StatefulSet) and the pods will be recreated. To upgrade to this version from a previous version, you need to run the following steps:
1. Add new label to your pods
```console
kubectl label pod my-release-0 app.kubernetes.io/component=etcd
# Repeat for all etcd pods, based on configured .replicaCount (excluding the etcd snappshoter pod, if .disasterRecovery.enabled is set to true)
````
2. Remove the StatefulSet keeping the pods:
```console
kubectl delete statefulset my-release --cascade=orphan
```
3. Upgrade your cluster:
```console
helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd --set auth.rbac.rootPassword=$ETCD_ROOT_PASSWORD
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
### To 8.0.0
This version reverts the change in the previous major bump ([7.0.0](https://github.com/bitnami/charts/tree/main/bitnami/etcd#to-700)). Now the default `etcd` branch is `3.5` again once confirmed by the [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation) that this version is production-ready once solved the data corruption issue.
### To 7.0.0
This version changes the default `etcd` branch to `3.4` as suggested by [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation). In order to migrate the data follow the official etcd instructions.
### To 6.0.0
This version introduces several features and performance improvements:
- The statefulset can now be scaled using `kubectl scale` command. Using `helm upgrade` to recalculate available endpoints is no longer needed.
- The scripts used for bootstrapping, runtime reconfiguration, and disaster recovery have been refactored and moved to the etcd container with two purposes: removing technical debt & improving the stability.
- Several parameters were reorganized to simplify the structure and follow the same standard used on other Bitnami charts:
- `etcd.initialClusterState` is renamed to `initialClusterState`.
- `statefulset.replicaCount` is renamed to `replicaCount`.
- `statefulset.podManagementPolicy` is renamed to `podManagementPolicy`.
- `statefulset.updateStrategy` and `statefulset.rollingUpdatePartition` are merged into `updateStrategy`.
- `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`.
- `configFileConfigMap` is deprecated in favor of `configuration` and `existingConfigmap`.
- `envVarsConfigMap` is deprecated in favor of `extraEnvVars`, `extraEnvVarsCM` and `extraEnvVarsSecret`.
- `allowNoneAuthentication` is renamed to `auth.rbac.allowNoneAuthentication`.
- New parameters/features were added:
- `extraDeploy` to deploy any extra desired object.
- `initContainers` and `sidecars` to define custom init containers and sidecars.
- `extraVolumes`, `extraVolumeMounts` and `extraVolumeClaimTemplates` to define custom volumes, mount points and volume claim templates.
- Probes can be now customized, and support to startup probes is added.
- LifecycleHooks can be customized using `lifecycleHooks` parameter.
- The default command/args can be customized using `command` and `args` parameters.
- Metrics integration with Prometheus Operator does no longer use a ServiceMonitor object, but a PodMonitor instead.
Consequences:
- Backwards compatibility is not guaranteed unless you adapt you **values.yaml** according to the changes described above.
### To 5.2.0
This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
### To 5.0.0
[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
### To 4.4.14
In this release we addressed a vulnerability that showed the `ETCD_ROOT_PASSWORD` environment variable in the application logs. Users are advised to update immediately. More information in [this issue](https://github.com/bitnami/charts/issues/1901).
### To 3.0.0
Backwards compatibility is not guaranteed. The following notables changes were included:
- **etcdctl** uses v3 API.
- Adds support for auto disaster recovery.
- Labels are adapted to follow the Helm charts best practices.
To upgrade from previous charts versions, create a snapshot of the keyspace and restore it in a new etcd cluster. Only v3 API data can be restored.
You can use the command below to upgrade your chart by starting a new cluster using an existing snapshot, available in an existing PVC, to initialize the members:
```console
helm install new-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd \
--set statefulset.replicaCount=3 \
--set persistence.enabled=true \
--set persistence.size=8Gi \
--set startFromSnapshot.enabled=true \
--set startFromSnapshot.existingClaim=my-claim \
--set startFromSnapshot.snapshotFilename=my-snapshot.db
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
### To 1.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is etcd:
```console
kubectl delete statefulset etcd --cascade=false
```
## License
Copyright &copy; 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# img folder
img/
# Changelog
CHANGELOG.md

View File

@@ -0,0 +1,23 @@
annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.31.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
icon: https://dyltqmyl993wv.cloudfront.net/downloads/logos/bitnami-mark.png
keywords:
- common
- helper
- template
- function
- bitnami
maintainers:
- name: Broadcom, Inc. All Rights Reserved.
url: https://github.com/bitnami/charts
name: common
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/common
type: library
version: 2.31.1

View File

@@ -0,0 +1,381 @@
# Bitnami Common Library Chart
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
## TL;DR
```yaml
dependencies:
- name: common
version: 2.x.x
repository: oci://registry-1.docker.io/bitnamicharts
```
```console
helm dependency update
```
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}
data:
myvalue: "Hello World"
```
Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog.
## Introduction
This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.23+
- Helm 3.8.0+
## Parameters
The following table lists the helpers available in the library which are scoped in different sections.
### Affinities
| Helper identifier | Description | Expected Input |
| ------------------------------- | ---------------------------------------------------- | ------------------------------------------------------------ |
| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
| `common.affinities.nodes` | Return a nodeAffinity definition | `dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")` |
| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` |
| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
| `common.affinities.pods` | Return a podAffinity/podAntiAffinity definition | `dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")` |
### Capabilities
| Helper identifier | Description | Expected Input |
| --------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | --------------------------------------- |
| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
| `common.capabilities.apiVersions.has` | Return true if the apiVersion is supported | `dict "version" "batch/v1" "context" $` |
| `common.capabilities.job.apiVersion` | Return the appropriate apiVersion for job. | `.` Chart context |
| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
| `common.capabilities.daemonset.apiVersion` | Return the appropriate apiVersion for daemonset. | `.` Chart context |
| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context |
| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context |
| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context |
| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context |
| `common.capabilities.vpa.apiVersion` | Return the appropriate apiVersion for Vertical Pod Autoscaler. | `.` Chart context |
| `common.capabilities.psp.supported` | Returns true if PodSecurityPolicy is supported | `.` Chart context |
| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
| `common.capabilities.admissionConfiguration.supported` | Returns true if AdmissionConfiguration is supported | `.` Chart context |
| `common.capabilities.admissionConfiguration.apiVersion` | Return the appropriate apiVersion for AdmissionConfiguration. | `.` Chart context |
| `common.capabilities.podSecurityConfiguration.apiVersion` | Return the appropriate apiVersion for PodSecurityConfiguration. | `.` Chart context |
### Compatibility
| Helper identifier | Description | Expected Input |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- |
| `common.compatibility.isOpenshift` | Return true if the detected platform is Openshift | `.` Chart context |
| `common.compatibility.renderSecurityContext` | Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC | `dict "secContext" .Values.containerSecurityContext "context" $` |
### Errors
| Helper identifier | Description | Expected Input |
| --------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` |
| `common.errors.insecureImages` | Throw error when original container images are replaced. The error can be bypassed by setting the `global.security.allowInsecureImages` to true. | `dict "images" (list .Values.path.to.the.imageRoot) "context" $` |
### Images
| Helper identifier | Description | Expected Input |
| --------------------------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. |
| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` |
| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` |
| `common.images.version` | Return the proper image version | `dict "imageRoot" .Values.path.to.the.image "chart" .Chart` , see [ImageRoot](#imageroot) for the structure. |
### Ingress
| Helper identifier | Description | Expected Input |
| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences |
| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context |
| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context |
| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` |
### Labels
| Helper identifier | Description | Expected Input |
| --------------------------- | --------------------------------------------------------------------------- | ----------------- |
| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context |
| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context |
### Names
| Helper identifier | Description | Expected Input |
| ---------------------------------- | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |
| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context |
| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context |
| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context |
| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context |
| `common.names.chart` | Chart name plus version | `.` Chart context |
| `common.names.dependency.fullname` | Create a default fully qualified dependency name. | `dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $` |
### Resources
| Helper identifier | Description | Expected Input |
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- |
| `common.resources.preset` | Return a resource request/limit object based on a given preset. These presets are for basic testing and not meant to be used in production. | `dict "type" "nano"` |
### Secrets
| Helper identifier | Description | Expected Input |
| --------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. |
| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. |
| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "honorProvidedValues" false "context" $`, length, strong, honorProvidedValues and chartName fields are optional. |
| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` |
| `common.secrets.lookup` | Reuses the value from an existing secret, otherwise sets its value to a default value. | `dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $` |
### Storage
| Helper identifier | Description | Expected Input |
| ---------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------- |
| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
### TplValues
| Helper identifier | Description | Expected Input |
| ---------------------------------- | ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` |
| `common.tplvalues.merge` | Merge a list of values that contains template after rendering them. | `dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $` |
| `common.tplvalues.merge-overwrite` | Merge a list of values that contains template after rendering them. | `dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $` |
### Utils
| Helper identifier | Description | Expected Input |
| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` |
| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` |
| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` |
| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` |
| `common.utils.checksumTemplate` | Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376) | `dict "path" "/configmap.yaml" "context" $` |
### Validations
| Helper identifier | Description | Expected Input |
| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) |
| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) |
| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. |
### Warnings
| Helper identifier | Description | Expected Input |
| -------------------------------- | ----------------------------------------------------------------- | ---------------------------------------------------------- |
| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. |
| `common.warnings.modifiedImages` | Warning about replaced images from the original. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. |
| `common.warnings.resources` | Warning about not setting the resource object in all deployments. | `dict "sections" (list "path1" "path2") context $` |
## Special input schemas
### ImageRoot
```yaml
registry:
type: string
description: Docker registry where the image is located
example: docker.io
repository:
type: string
description: Repository and image name
example: bitnami/nginx
tag:
type: string
description: image tag
example: 1.16.1-debian-10-r63
pullPolicy:
type: string
description: Specify a imagePullPolicy.'
pullSecrets:
type: array
items:
type: string
description: Optionally specify an array of imagePullSecrets (evaluated as templates).
debug:
type: boolean
description: Set to true if you would like to see extra information on logs
example: false
## An instance would be:
# registry: docker.io
# repository: bitnami/nginx
# tag: 1.16.1-debian-10-r63
# pullPolicy: IfNotPresent
# debug: false
```
### Persistence
```yaml
enabled:
type: boolean
description: Whether enable persistence.
example: true
storageClass:
type: string
description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
example: "-"
accessMode:
type: string
description: Access mode for the Persistent Volume Storage.
example: ReadWriteOnce
size:
type: string
description: Size the Persistent Volume Storage.
example: 8Gi
path:
type: string
description: Path to be persisted.
example: /bitnami
## An instance would be:
# enabled: true
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 8Gi
# path: /bitnami
```
### ExistingSecret
```yaml
name:
type: string
description: Name of the existing secret.
example: mySecret
keyMapping:
description: Mapping between the expected key name and the name of the key in the existing secret.
type: object
## An instance would be:
# name: mySecret
# keyMapping:
# password: myPasswordKey
```
#### Example of use
When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
```yaml
# templates/secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "common.names.fullname" . }}
labels:
app: {{ include "common.names.fullname" . }}
type: Opaque
data:
password: {{ .Values.password | b64enc | quote }}
# templates/dpl.yaml
---
...
env:
- name: PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
...
# values.yaml
---
name: mySecret
keyMapping:
password: myPasswordKey
```
### ValidateValue
#### NOTES.txt
```console
{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
```
If we force those values to be empty we will see some alerts
```console
helm install test mychart --set path.to.value00="",path.to.value01=""
'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
```
## Upgrading
### To 1.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
#### What changes were introduced in this major version?
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
#### Considerations when upgrading to this version
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
#### Useful links
- <https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-resolve-helm2-helm3-post-migration-issues-index.html>
- <https://helm.sh/docs/topics/v2_v3_migration/>
- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
## License
Copyright &copy; 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,169 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a soft nodeAffinity definition
{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.soft" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
weight: 1
{{- end -}}
{{/*
Return a hard nodeAffinity definition
{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.hard" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
{{- end -}}
{{/*
Return a nodeAffinity definition
{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.nodes.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.nodes.hard" . -}}
{{- end -}}
{{- end -}}
{{/*
Return a topologyKey definition
{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
*/}}
{{- define "common.affinities.topologyKey" -}}
{{ .topologyKey | default "kubernetes.io/hostname" -}}
{{- end -}}
{{/*
Return a soft podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "extraNamespaces" (list "namespace1" "namespace2") "context" $) -}}
*/}}
{{- define "common.affinities.pods.soft" -}}
{{- $component := default "" .component -}}
{{- $customLabels := default (dict) .customLabels -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
{{- $extraNamespaces := default (list) .extraNamespaces -}}
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if $extraNamespaces }}
namespaces:
- {{ .context.Release.Namespace }}
{{- with $extraNamespaces }}
{{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }}
{{- end }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
weight: 1
{{- range $extraPodAffinityTerms }}
- podAffinityTerm:
labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := .extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if .namespaces }}
namespaces:
- {{ $.context.Release.Namespace }}
{{- with .namespaces }}
{{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }}
{{- end }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
weight: {{ .weight | default 1 -}}
{{- end -}}
{{- end -}}
{{/*
Return a hard podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "extraNamespaces" (list "namespace1" "namespace2") "context" $) -}}
*/}}
{{- define "common.affinities.pods.hard" -}}
{{- $component := default "" .component -}}
{{- $customLabels := default (dict) .customLabels -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
{{- $extraNamespaces := default (list) .extraNamespaces -}}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if $extraNamespaces }}
namespaces:
- {{ .context.Release.Namespace }}
{{- with $extraNamespaces }}
{{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 6 }}
{{- end }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
{{- range $extraPodAffinityTerms }}
- labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := .extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if .namespaces }}
namespaces:
- {{ $.context.Release.Namespace }}
{{- with .namespaces }}
{{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 6 }}
{{- end }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
{{- end -}}
{{- end -}}
{{/*
Return a podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.pods" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.pods.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.pods.hard" . -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,178 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the target Kubernetes version
*/}}
{{- define "common.capabilities.kubeVersion" -}}
{{- default (default .Capabilities.KubeVersion.Version .Values.kubeVersion) ((.Values.global).kubeVersion) -}}
{{- end -}}
{{/*
Return true if the apiVersion is supported
Usage:
{{ include "common.capabilities.apiVersions.has" (dict "version" "batch/v1" "context" $) }}
*/}}
{{- define "common.capabilities.apiVersions.has" -}}
{{- $providedAPIVersions := default .context.Values.apiVersions ((.context.Values.global).apiVersions) -}}
{{- if and (empty $providedAPIVersions) (.context.Capabilities.APIVersions.Has .version) -}}
{{- true -}}
{{- else if has .version $providedAPIVersions -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for poddisruptionbudget.
*/}}
{{- define "common.capabilities.policy.apiVersion" -}}
{{- print "policy/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "common.capabilities.networkPolicy.apiVersion" -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for job.
*/}}
{{- define "common.capabilities.job.apiVersion" -}}
{{- print "batch/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for cronjob.
*/}}
{{- define "common.capabilities.cronjob.apiVersion" -}}
{{- print "batch/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for daemonset.
*/}}
{{- define "common.capabilities.daemonset.apiVersion" -}}
{{- print "apps/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "common.capabilities.deployment.apiVersion" -}}
{{- print "apps/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "common.capabilities.statefulset.apiVersion" -}}
{{- print "apps/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "common.capabilities.ingress.apiVersion" -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for RBAC resources.
*/}}
{{- define "common.capabilities.rbac.apiVersion" -}}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for CRDs.
*/}}
{{- define "common.capabilities.crd.apiVersion" -}}
{{- print "apiextensions.k8s.io/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for APIService.
*/}}
{{- define "common.capabilities.apiService.apiVersion" -}}
{{- print "apiregistration.k8s.io/v1" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "common.capabilities.hpa.apiVersion" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" .context -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Vertical Pod Autoscaler.
*/}}
{{- define "common.capabilities.vpa.apiVersion" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" .context -}}
{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}}
{{- print "autoscaling/v1beta2" -}}
{{- else -}}
{{- print "autoscaling/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if PodSecurityPolicy is supported
*/}}
{{- define "common.capabilities.psp.supported" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}}
{{- if or (empty $kubeVersion) (semverCompare "<1.25-0" $kubeVersion) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if AdmissionConfiguration is supported
*/}}
{{- define "common.capabilities.admissionConfiguration.supported" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}}
{{- true -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for AdmissionConfiguration.
*/}}
{{- define "common.capabilities.admissionConfiguration.apiVersion" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}}
{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}}
{{- print "apiserver.config.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "apiserver.config.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for PodSecurityConfiguration.
*/}}
{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}}
{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}}
{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}}
{{- print "pod-security.admission.config.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "pod-security.admission.config.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if the used Helm version is 3.3+.
A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
**To be removed when the catalog's minimun Helm version is 3.3**
*/}}
{{- define "common.capabilities.supportsHelmVersion" -}}
{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,46 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return true if the detected platform is Openshift
Usage:
{{- include "common.compatibility.isOpenshift" . -}}
*/}}
{{- define "common.compatibility.isOpenshift" -}}
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC
Usage:
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}}
*/}}
{{- define "common.compatibility.renderSecurityContext" -}}
{{- $adaptedContext := .secContext -}}
{{- if (((.context.Values.global).compatibility).openshift) -}}
{{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
{{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
{{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
{{- if not .secContext.seLinuxOptions -}}
{{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}}
{{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* Remove empty seLinuxOptions object if global.compatibility.omitEmptySeLinuxOptions is set to true */}}
{{- if and (((.context.Values.global).compatibility).omitEmptySeLinuxOptions) (not .secContext.seLinuxOptions) -}}
{{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
{{- end -}}
{{/* Remove fields that are disregarded when running the container in privileged mode */}}
{{- if $adaptedContext.privileged -}}
{{- $adaptedContext = omit $adaptedContext "capabilities" -}}
{{- end -}}
{{- omit $adaptedContext "enabled" | toYaml -}}
{{- end -}}

View File

@@ -0,0 +1,85 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Throw error when upgrading using empty passwords values that must not be empty.
Usage:
{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
Required password params:
- validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
- context - Context - Required. Parent context.
*/}}
{{- define "common.errors.upgrade.passwords.empty" -}}
{{- $validationErrors := join "" .validationErrors -}}
{{- if and $validationErrors .context.Release.IsUpgrade -}}
{{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
{{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
{{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
{{- $errorString = print $errorString "\n%s" -}}
{{- printf $errorString $validationErrors | fail -}}
{{- end -}}
{{- end -}}
{{/*
Throw error when original container images are replaced.
The error can be bypassed by setting the "global.security.allowInsecureImages" to true. In this case,
a warning message will be shown instead.
Usage:
{{ include "common.errors.insecureImages" (dict "images" (list .Values.path.to.the.imageRoot) "context" $) }}
*/}}
{{- define "common.errors.insecureImages" -}}
{{- $relocatedImages := list -}}
{{- $replacedImages := list -}}
{{- $retaggedImages := list -}}
{{- $globalRegistry := ((.context.Values.global).imageRegistry) -}}
{{- $originalImages := .context.Chart.Annotations.images -}}
{{- range .images -}}
{{- $registryName := default .registry $globalRegistry -}}
{{- $fullImageNameNoTag := printf "%s/%s" $registryName .repository -}}
{{- $fullImageName := printf "%s:%s" $fullImageNameNoTag .tag -}}
{{- if not (contains $fullImageNameNoTag $originalImages) -}}
{{- if not (contains $registryName $originalImages) -}}
{{- $relocatedImages = append $relocatedImages $fullImageName -}}
{{- else if not (contains .repository $originalImages) -}}
{{- $replacedImages = append $replacedImages $fullImageName -}}
{{- end -}}
{{- end -}}
{{- if not (contains (printf "%s:%s" .repository .tag) $originalImages) -}}
{{- $retaggedImages = append $retaggedImages $fullImageName -}}
{{- end -}}
{{- end -}}
{{- if and (or (gt (len $relocatedImages) 0) (gt (len $replacedImages) 0)) (((.context.Values.global).security).allowInsecureImages) -}}
{{- print "\n\n⚠ SECURITY WARNING: Verifying original container images was skipped. Please note this Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.\n" -}}
{{- else if (or (gt (len $relocatedImages) 0) (gt (len $replacedImages) 0)) -}}
{{- $errorString := "Original containers have been substituted for unrecognized ones. Deploying this chart with non-standard containers is likely to cause degraded security and performance, broken chart features, and missing environment variables." -}}
{{- $errorString = print $errorString "\n\nUnrecognized images:" -}}
{{- range (concat $relocatedImages $replacedImages) -}}
{{- $errorString = print $errorString "\n - " . -}}
{{- end -}}
{{- if or (contains "docker.io/bitnami/" $originalImages) (contains "docker.io/bitnamiprem/" $originalImages) -}}
{{- $errorString = print "\n\n⚠ ERROR: " $errorString -}}
{{- $errorString = print $errorString "\n\nIf you are sure you want to proceed with non-standard containers, you can skip container image verification by setting the global parameter 'global.security.allowInsecureImages' to true." -}}
{{- $errorString = print $errorString "\nFurther information can be obtained at https://github.com/bitnami/charts/issues/30850" -}}
{{- print $errorString | fail -}}
{{- else if gt (len $replacedImages) 0 -}}
{{- $errorString = print "\n\n WARNING: " $errorString -}}
{{- print $errorString -}}
{{- end -}}
{{- else if gt (len $retaggedImages) 0 -}}
{{- $warnString := "\n\n WARNING: Original containers have been retagged. Please note this Helm chart was tested, and validated on multiple platforms using a specific set of Tanzu Application Catalog containers. Substituting original image tags could cause unexpected behavior." -}}
{{- $warnString = print $warnString "\n\nRetagged images:" -}}
{{- range $retaggedImages -}}
{{- $warnString = print $warnString "\n - " . -}}
{{- end -}}
{{- print $warnString -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,115 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper image name.
If image tag and digest are not defined, termination fallbacks to chart appVersion.
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global "chart" .Chart ) }}
*/}}
{{- define "common.images.image" -}}
{{- $registryName := default .imageRoot.registry ((.global).imageRegistry) -}}
{{- $repositoryName := .imageRoot.repository -}}
{{- $separator := ":" -}}
{{- $termination := .imageRoot.tag | toString -}}
{{- if not .imageRoot.tag }}
{{- if .chart }}
{{- $termination = .chart.AppVersion | toString -}}
{{- end -}}
{{- end -}}
{{- if .imageRoot.digest }}
{{- $separator = "@" -}}
{{- $termination = .imageRoot.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else -}}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
*/}}
{{- define "common.images.pullSecrets" -}}
{{- $pullSecrets := list }}
{{- range ((.global).imagePullSecrets) -}}
{{- if kindIs "map" . -}}
{{- $pullSecrets = append $pullSecrets .name -}}
{{- else -}}
{{- $pullSecrets = append $pullSecrets . -}}
{{- end }}
{{- end -}}
{{- range .images -}}
{{- range .pullSecrets -}}
{{- if kindIs "map" . -}}
{{- $pullSecrets = append $pullSecrets .name -}}
{{- else -}}
{{- $pullSecrets = append $pullSecrets . -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if (not (empty $pullSecrets)) -}}
imagePullSecrets:
{{- range $pullSecrets | uniq }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names evaluating values as templates
{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
*/}}
{{- define "common.images.renderPullSecrets" -}}
{{- $pullSecrets := list }}
{{- $context := .context }}
{{- range (($context.Values.global).imagePullSecrets) -}}
{{- if kindIs "map" . -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
{{- else -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
{{- end -}}
{{- end -}}
{{- range .images -}}
{{- range .pullSecrets -}}
{{- if kindIs "map" . -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}}
{{- else -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if (not (empty $pullSecrets)) -}}
imagePullSecrets:
{{- range $pullSecrets | uniq }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion)
{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }}
*/}}
{{- define "common.images.version" -}}
{{- $imageTag := .imageRoot.tag | toString -}}
{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}}
{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}}
{{- $version := semver $imageTag -}}
{{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}}
{{- else -}}
{{- print .chart.AppVersion -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,41 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Generate backend entry that is compatible with all Kubernetes API versions.
Usage:
{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
Params:
- serviceName - String. Name of an existing service backend
- servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
- context - Dict - Required. The context for the template evaluation.
*/}}
{{- define "common.ingress.backend" -}}
service:
name: {{ .serviceName }}
port:
{{- if typeIs "string" .servicePort }}
name: {{ .servicePort }}
{{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
number: {{ .servicePort | int }}
{{- end }}
{{- end -}}
{{/*
Return true if cert-manager required annotations for TLS signed
certificates are set in the Ingress annotations
Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
Usage:
{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
*/}}
{{- define "common.ingress.certManagerRequest" -}}
{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,46 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Kubernetes standard labels
{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}}
*/}}
{{- define "common.labels.standard" -}}
{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}}
{{- with .context.Chart.AppVersion -}}
{{- $_ := set $default "app.kubernetes.io/version" . -}}
{{- end -}}
{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }}
{{- else -}}
app.kubernetes.io/name: {{ include "common.names.name" . }}
helm.sh/chart: {{ include "common.names.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Chart.AppVersion }}
app.kubernetes.io/version: {{ . | quote }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector
{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}}
We don't want to loop over custom labels appending them to the selector
since it's very likely that it will break deployments, services, etc.
However, it's important to overwrite the standard labels if the user
overwrote them on metadata.labels fields.
*/}}
{{- define "common.labels.matchLabels" -}}
{{- if and (hasKey . "customLabels") (hasKey . "context") -}}
{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }}
{{- else -}}
app.kubernetes.io/name: {{ include "common.names.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,71 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "common.names.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "common.names.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "common.names.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified dependency name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
Usage:
{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
*/}}
{{- define "common.names.dependency.fullname" -}}
{{- if .chartValues.fullnameOverride -}}
{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .chartName .chartValues.nameOverride -}}
{{- if contains $name .context.Release.Name -}}
{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
*/}}
{{- define "common.names.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a fully qualified app name adding the installation's namespace.
*/}}
{{- define "common.names.fullname.namespace" -}}
{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,50 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "common.resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "common.resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,192 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Generate secret name.
Usage:
{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
Params:
- existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
- defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
- context - Dict - Required. The context for the template evaluation.
*/}}
{{- define "common.secrets.name" -}}
{{- $name := (include "common.names.fullname" .context) -}}
{{- if .defaultNameSuffix -}}
{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- with .existingSecret -}}
{{- if not (typeIs "string" .) -}}
{{- with .name -}}
{{- $name = . -}}
{{- end -}}
{{- else -}}
{{- $name = . -}}
{{- end -}}
{{- end -}}
{{- printf "%s" $name -}}
{{- end -}}
{{/*
Generate secret key.
Usage:
{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
Params:
- existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
- key - String - Required. Name of the key in the secret.
*/}}
{{- define "common.secrets.key" -}}
{{- $key := .key -}}
{{- if .existingSecret -}}
{{- if not (typeIs "string" .existingSecret) -}}
{{- if .existingSecret.keyMapping -}}
{{- $key = index .existingSecret.keyMapping $.key -}}
{{- end -}}
{{- end }}
{{- end -}}
{{- printf "%s" $key -}}
{{- end -}}
{{/*
Generate secret password or retrieve one if already created.
Usage:
{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "honorProvidedValues" false "context" $) }}
Params:
- secret - String - Required - Name of the 'Secret' resource where the password is stored.
- key - String - Required - Name of the key in the secret.
- providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
- length - int - Optional - Length of the generated random password.
- strong - Boolean - Optional - Whether to add symbols to the generated random password.
- chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
- context - Context - Required - Parent context.
- failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
- skipB64enc - Boolean - Optional - Default to false. If set to true, no the secret will not be base64 encrypted.
- skipQuote - Boolean - Optional - Default to false. If set to true, no quotes will be added around the secret.
- honorProvidedValues - Boolean - Optional - Default to false. If set to true, the values in providedValues have higher priority than an existing secret
The order in which this function returns a secret password:
1. Password provided via the values.yaml if honorProvidedValues = true
(If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
2. Already existing 'Secret' resource
(If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
3. Password provided via the values.yaml if honorProvidedValues = false
(If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
4. Randomly generated secret password
(A new random secret password with the length specified in the 'length' parameter will be generated and returned)
*/}}
{{- define "common.secrets.passwords.manage" -}}
{{- $password := "" }}
{{- $subchart := "" }}
{{- $chartName := default "" .chartName }}
{{- $passwordLength := default 10 .length }}
{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
{{- if $secretData }}
{{- if hasKey $secretData .key }}
{{- $password = index $secretData .key | b64dec }}
{{- else if not (eq .failOnNew false) }}
{{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
{{- end -}}
{{- end }}
{{- if and $providedPasswordValue .honorProvidedValues }}
{{- $password = tpl ($providedPasswordValue | toString) .context }}
{{- end }}
{{- if not $password }}
{{- if $providedPasswordValue }}
{{- $password = tpl ($providedPasswordValue | toString) .context }}
{{- else }}
{{- if .context.Values.enabled }}
{{- $subchart = $chartName }}
{{- end -}}
{{- if not (eq .failOnNew false) }}
{{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
{{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
{{- $passwordValidationErrors := list $requiredPasswordError -}}
{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
{{- end }}
{{- if .strong }}
{{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
{{- $password = randAscii $passwordLength }}
{{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
{{- $password = printf "%s%s" $subStr $password | toString | shuffle }}
{{- else }}
{{- $password = randAlphaNum $passwordLength }}
{{- end }}
{{- end -}}
{{- end -}}
{{- if not .skipB64enc }}
{{- $password = $password | b64enc }}
{{- end -}}
{{- if .skipQuote -}}
{{- printf "%s" $password -}}
{{- else -}}
{{- printf "%s" $password | quote -}}
{{- end -}}
{{- end -}}
{{/*
Reuses the value from an existing secret, otherwise sets its value to a default value.
Usage:
{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
Params:
- secret - String - Required - Name of the 'Secret' resource where the password is stored.
- key - String - Required - Name of the key in the secret.
- defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
- context - Context - Required - Parent context.
*/}}
{{- define "common.secrets.lookup" -}}
{{- $value := "" -}}
{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
{{- if and $secretData (hasKey $secretData .key) -}}
{{- $value = index $secretData .key -}}
{{- else if .defaultValue -}}
{{- $value = .defaultValue | toString | b64enc -}}
{{- end -}}
{{- if $value -}}
{{- printf "%s" $value -}}
{{- end -}}
{{- end -}}
{{/*
Returns whether a previous generated secret already exists
Usage:
{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
Params:
- secret - String - Required - Name of the 'Secret' resource where the password is stored.
- context - Context - Required - Parent context.
*/}}
{{- define "common.secrets.exists" -}}
{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
{{- if $secret }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,21 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper Storage Class
{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
*/}}
{{- define "common.storage.class" -}}
{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}}
{{- if $storageClass -}}
{{- if (eq "-" $storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else -}}
{{- printf "storageClassName: %s" $storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,52 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Renders a value that contains template perhaps with scope if the scope is present.
Usage:
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
*/}}
{{- define "common.tplvalues.render" -}}
{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
{{- if contains "{{" (toJson .value) }}
{{- if .scope }}
{{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
{{- else }}
{{- tpl $value .context }}
{{- end }}
{{- else }}
{{- $value }}
{{- end }}
{{- end -}}
{{/*
Merge a list of values that contains template after rendering them.
Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge
Usage:
{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }}
*/}}
{{- define "common.tplvalues.merge" -}}
{{- $dst := dict -}}
{{- range .values -}}
{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}}
{{- end -}}
{{ $dst | toYaml }}
{{- end -}}
{{/*
Merge a list of values that contains template after rendering them.
Merge precedence is consistent with https://masterminds.github.io/sprig/dicts.html#mergeoverwrite-mustmergeoverwrite
Usage:
{{ include "common.tplvalues.merge-overwrite" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }}
*/}}
{{- define "common.tplvalues.merge-overwrite" -}}
{{- $dst := dict -}}
{{- range .values -}}
{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | mergeOverwrite $dst -}}
{{- end -}}
{{ $dst | toYaml }}
{{- end -}}

View File

@@ -0,0 +1,77 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Print instructions to get a secret value.
Usage:
{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
*/}}
{{- define "common.utils.secret.getvalue" -}}
{{- $varname := include "common.utils.fieldToEnvVar" . -}}
export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
{{- end -}}
{{/*
Build env var name given a field
Usage:
{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
*/}}
{{- define "common.utils.fieldToEnvVar" -}}
{{- $fieldNameSplit := splitList "-" .field -}}
{{- $upperCaseFieldNameSplit := list -}}
{{- range $fieldNameSplit -}}
{{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
{{- end -}}
{{ join "_" $upperCaseFieldNameSplit }}
{{- end -}}
{{/*
Gets a value from .Values given
Usage:
{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
*/}}
{{- define "common.utils.getValueFromKey" -}}
{{- $splitKey := splitList "." .key -}}
{{- $value := "" -}}
{{- $latestObj := $.context.Values -}}
{{- range $splitKey -}}
{{- if not $latestObj -}}
{{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
{{- end -}}
{{- $value = ( index $latestObj . ) -}}
{{- $latestObj = $value -}}
{{- end -}}
{{- printf "%v" (default "" $value) -}}
{{- end -}}
{{/*
Returns first .Values key with a defined value or first of the list if all non-defined
Usage:
{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
*/}}
{{- define "common.utils.getKeyFromList" -}}
{{- $key := first .keys -}}
{{- $reverseKeys := reverse .keys }}
{{- range $reverseKeys }}
{{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
{{- if $value -}}
{{- $key = . }}
{{- end -}}
{{- end -}}
{{- printf "%s" $key -}}
{{- end -}}
{{/*
Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376).
Usage:
{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }}
*/}}
{{- define "common.utils.checksumTemplate" -}}
{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}}
{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }}
{{- end -}}

View File

@@ -0,0 +1,109 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Warning about using rolling tag.
Usage:
{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
*/}}
{{- define "common.warnings.rollingTag" -}}
{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
+info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html
{{- end }}
{{- end -}}
{{/*
Warning about replaced images from the original.
Usage:
{{ include "common.warnings.modifiedImages" (dict "images" (list .Values.path.to.the.imageRoot) "context" $) }}
*/}}
{{- define "common.warnings.modifiedImages" -}}
{{- $affectedImages := list -}}
{{- $printMessage := false -}}
{{- $originalImages := .context.Chart.Annotations.images -}}
{{- range .images -}}
{{- $fullImageName := printf (printf "%s/%s:%s" .registry .repository .tag) -}}
{{- if not (contains $fullImageName $originalImages) }}
{{- $affectedImages = append $affectedImages (printf "%s/%s:%s" .registry .repository .tag) -}}
{{- $printMessage = true -}}
{{- end -}}
{{- end -}}
{{- if $printMessage }}
⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.
Substituted images detected:
{{- range $affectedImages }}
- {{ . }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Warning about not setting the resource object in all deployments.
Usage:
{{ include "common.warnings.resources" (dict "sections" (list "path1" "path2") context $) }}
Example:
{{- include "common.warnings.resources" (dict "sections" (list "csiProvider.provider" "server" "volumePermissions" "") "context" $) }}
The list in the example assumes that the following values exist:
- csiProvider.provider.resources
- server.resources
- volumePermissions.resources
- resources
*/}}
{{- define "common.warnings.resources" -}}
{{- $values := .context.Values -}}
{{- $printMessage := false -}}
{{ $affectedSections := list -}}
{{- range .sections -}}
{{- if eq . "" -}}
{{/* Case where the resources section is at the root (one main deployment in the chart) */}}
{{- if not (index $values "resources") -}}
{{- $affectedSections = append $affectedSections "resources" -}}
{{- $printMessage = true -}}
{{- end -}}
{{- else -}}
{{/* Case where the are multiple resources sections (more than one main deployment in the chart) */}}
{{- $keys := split "." . -}}
{{/* We iterate through the different levels until arriving to the resource section. Example: a.b.c.resources */}}
{{- $section := $values -}}
{{- range $keys -}}
{{- $section = index $section . -}}
{{- end -}}
{{- if not (index $section "resources") -}}
{{/* If the section has enabled=false or replicaCount=0, do not include it */}}
{{- if and (hasKey $section "enabled") -}}
{{- if index $section "enabled" -}}
{{/* enabled=true */}}
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
{{- $printMessage = true -}}
{{- end -}}
{{- else if and (hasKey $section "replicaCount") -}}
{{/* We need a casting to int because number 0 is not treated as an int by default */}}
{{- if (gt (index $section "replicaCount" | int) 0) -}}
{{/* replicaCount > 0 */}}
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
{{- $printMessage = true -}}
{{- end -}}
{{- else -}}
{{/* Default case, add it to the affected sections */}}
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
{{- $printMessage = true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if $printMessage }}
WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
{{- range $affectedSections }}
- {{ . }}
{{- end }}
+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,51 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Auxiliary function to get the right value for existingSecret.
Usage:
{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
Params:
- subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
*/}}
{{- define "common.cassandra.values.existingSecret" -}}
{{- if .subchart -}}
{{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
{{- else -}}
{{- .context.Values.dbUser.existingSecret | quote -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for enabled cassandra.
Usage:
{{ include "common.cassandra.values.enabled" (dict "context" $) }}
*/}}
{{- define "common.cassandra.values.enabled" -}}
{{- if .subchart -}}
{{- printf "%v" .context.Values.cassandra.enabled -}}
{{- else -}}
{{- printf "%v" (not .context.Values.enabled) -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for the key dbUser
Usage:
{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
Params:
- subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
*/}}
{{- define "common.cassandra.values.key.dbUser" -}}
{{- if .subchart -}}
cassandra.dbUser
{{- else -}}
dbUser
{{- end -}}
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More