993 lines
35 KiB
YAML
993 lines
35 KiB
YAML
crds:
|
|
# -- Disables the installation of all CRDs if set to false
|
|
enabled: true
|
|
csi:
|
|
volumeSnapshots:
|
|
# -- Install Volume Snapshot CRDs
|
|
enabled: true
|
|
|
|
global:
|
|
security:
|
|
allowInsecureImages: true
|
|
|
|
image:
|
|
# -- Image registry to pull our product images
|
|
registry: docker.io
|
|
# -- Image registry's namespace
|
|
repo: openebs
|
|
# -- Release tag for our images
|
|
tag: v2.10.0
|
|
repoTags:
|
|
# Note: Below image tag configuration is optional and typically should never be
|
|
# used. Setting specific image tags for the different repositories proves useful
|
|
# for some integration testing scenarios. Use the 'tag' option above to set
|
|
# release/pre-release container image tags.
|
|
# The below tag values will be picked for images by default.
|
|
# If not specified, 'tag' option provided above will be picked.
|
|
controlPlane: ""
|
|
dataPlane: ""
|
|
extensions: ""
|
|
# -- ImagePullPolicy for our images
|
|
pullPolicy: IfNotPresent
|
|
# -- docker-secrets required to pull images if the container registry from image.registry is protected
|
|
pullSecrets: []
|
|
# -- Node labels for pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
|
# Note that if multi-arch images support 'kubernetes.io/arch: amd64'
|
|
# should be removed and set 'nodeSelector' to empty '{}' as default value.
|
|
nodeSelector:
|
|
kubernetes.io/arch: amd64
|
|
# -- Pod scheduling priority.
|
|
# Setting this value will apply to all components except the external Chart dependencies.
|
|
# If any component has `priorityClassName` set, then this value would be overridden for that component.
|
|
# For external components like etcd, jaeger or loki, PriorityClass can only be set at component level.
|
|
priorityClassName: ""
|
|
earlyEvictionTolerations:
|
|
- effect: NoExecute
|
|
key: node.kubernetes.io/unreachable
|
|
operator: Exists
|
|
tolerationSeconds: 5
|
|
- effect: NoExecute
|
|
key: node.kubernetes.io/not-ready
|
|
operator: Exists
|
|
tolerationSeconds: 5
|
|
# -- Tolerations to be applied to all components except external Chart dependencies.
|
|
# If any component has tolerations set, then it would override this value.
|
|
# For external components like etcd, jaeger and loki, tolerations can only be set at component level.
|
|
tolerations: []
|
|
base:
|
|
# -- Request timeout for rest & core agents
|
|
default_req_timeout: 5s
|
|
# -- Cache timeout for core agent & diskpool deployment
|
|
cache_poll_period: 30s
|
|
logging:
|
|
# -- Valid values for format are pretty, json and compact
|
|
format: pretty
|
|
# -- Enable ansi color code for Pod StdOut/StdErr
|
|
color: true
|
|
# -- Silence specific module components
|
|
silenceLevel:
|
|
initContainers:
|
|
enabled: true
|
|
image:
|
|
name: alpine-sh
|
|
tag: 4.3.0
|
|
pullPolicy: IfNotPresent
|
|
containers:
|
|
- name: agent-core-grpc-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-agent-core 50051; do date; echo "Waiting for agent-core-grpc services..."; sleep 1; done;']
|
|
- name: etcd-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ include "etcdUrl" . }} {{ .Values.etcd.service.ports.client }}; do date; echo "Waiting for etcd..."; sleep 1; done;']
|
|
initHaNodeContainers:
|
|
enabled: true
|
|
containers:
|
|
- name: agent-cluster-grpc-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-agent-core 50052; do date; echo "Waiting for agent-cluster-grpc services..."; sleep 1; done;']
|
|
initCoreContainers:
|
|
enabled: true
|
|
containers:
|
|
- name: etcd-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ include "etcdUrl" . }} {{ .Values.etcd.service.ports.client }}; do date; echo "Waiting for etcd..."; sleep 1; done;']
|
|
metrics:
|
|
# -- Enable the metrics exporter
|
|
enabled: true
|
|
# -- Container port for the metrics exporter service
|
|
port: 9502
|
|
jaeger:
|
|
# Enable jaeger tracing (for development only).
|
|
# Since version 1.31 the Jaeger Operator uses webhooks to validate Jaeger custom resources (CRs).
|
|
# This requires an installed version of the cert-manager.
|
|
enabled: false
|
|
initContainer: true
|
|
agent:
|
|
name: jaeger-agent
|
|
port: 6831
|
|
initContainer:
|
|
- name: jaeger-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 -u {{.Values.base.jaeger.agent.name}} {{.Values.base.jaeger.agent.port}}; do date; echo "Waiting for jaeger..."; sleep 1; done;']
|
|
collector:
|
|
name: jaeger-collector
|
|
port: 4317
|
|
initContainer:
|
|
- name: jaeger-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 -u {{.Values.base.jaeger.collector.name}} {{.Values.base.jaeger.collector.port}}; do date; echo "Waiting for jaeger..."; sleep 1; done;']
|
|
initRestContainer:
|
|
enabled: true
|
|
initContainer:
|
|
- name: api-rest-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-api-rest 8081; do date; echo "Waiting for REST API endpoint to become available"; sleep 1; done;']
|
|
operators:
|
|
pool:
|
|
# -- Log level for diskpool operator service
|
|
logLevel: info
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for diskpool operator
|
|
cpu: "100m"
|
|
# -- Memory limits for diskpool operator
|
|
memory: "32Mi"
|
|
requests:
|
|
# -- Cpu requests for diskpool operator
|
|
cpu: "50m"
|
|
# -- Memory requests for diskpool operator
|
|
memory: "16Mi"
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
jaeger-operator:
|
|
# Name of jaeger operator
|
|
name: "{{ .Release.Name }}"
|
|
jaeger:
|
|
# Install jaeger-operator
|
|
create: false
|
|
collector:
|
|
service:
|
|
otlp:
|
|
grpc: true
|
|
rbac:
|
|
# Create a clusterRole for Jaeger
|
|
clusterRole: true
|
|
tolerations: []
|
|
priorityClassName: ""
|
|
agents:
|
|
core:
|
|
# -- Request timeout for core agents
|
|
# Default value is defined in .base.default_req_timeout
|
|
requestTimeout:
|
|
# -- Enable minimal timeouts
|
|
minTimeouts: true
|
|
# -- Log level for the core service
|
|
logLevel: info
|
|
capacity:
|
|
thin:
|
|
# -- The allowed pool commitment limit when dealing with thin provisioned volumes.
|
|
# Example: If the commitment is 250 and the pool is 10GiB we can overcommit the pool
|
|
# up to 25GiB (create 2 10GiB and 1 5GiB volume) but no further.
|
|
poolCommitment: "250%"
|
|
# -- When creating replicas for an existing volume, each replica pool must have at least
|
|
# this much free space percentage of the volume size.
|
|
# Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed
|
|
# to be created on the pool is 100GiB.
|
|
volumeCommitment: "40%"
|
|
# -- Same as the `volumeCommitment` argument, but applicable only when creating replicas
|
|
# for a new volume.
|
|
volumeCommitmentInitial: "40%"
|
|
# -- When creating snapshots for an existing volume, each replica pool must have at least
|
|
# this much free space percentage of the volume size.
|
|
# Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed
|
|
# to be snapped on the pool is 100GiB.
|
|
snapshotCommitment: "40%"
|
|
rebuild:
|
|
# -- The maximum number of system-wide rebuilds permitted at any given time.
|
|
# If set to an empty string, there are no limits.
|
|
maxConcurrent: ""
|
|
partial:
|
|
# -- Partial rebuild uses a log of missed IO to rebuild replicas which have become temporarily faulted,
|
|
# hence a bit faster, depending on the log size.
|
|
enabled: true
|
|
# -- If a faulted replica comes back online within this time period then it will be
|
|
# rebuilt using the partial rebuild capability. Otherwise, the replica will be fully rebuilt.
|
|
# A blank value "" means internally derived value will be used.
|
|
waitPeriod: ""
|
|
# The maximum number of concurrent create volume requests.
|
|
maxCreateVolume: 10
|
|
# -- Enable extended volume health information, which helps generate the volume status more accurately.
|
|
volumeHealth: true
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for core agents
|
|
cpu: "1000m"
|
|
# -- Memory limits for core agents
|
|
memory: "128Mi"
|
|
requests:
|
|
# -- Cpu requests for core agents
|
|
cpu: "500m"
|
|
# -- Memory requests for core agents
|
|
memory: "32Mi"
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global.
|
|
# If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default.
|
|
# Refer the `templates/_helpers.tpl` and `templates/mayastor/agents/core/agent-core-deployment.yaml` for more details.
|
|
priorityClassName: ""
|
|
# -- Prefer encrypted pools for volume replicas.
|
|
# If a volume wasn't provisioned with a encryption storageclass, we try to place the replicas of such volume on best-effort basis onto encrypted pools, if this global is set.
|
|
# This is effective subject to volume spec already modified via plugin to request encryption.
|
|
encryptedPoolsSoftScheduling: false
|
|
# -- Allow using non-persistent kernel devpaths for pool disks.
|
|
# Enabling this will let users to use the kernel devpaths e.g /dev/sda, for diskpools. However, this comes with associated risks if the devpaths
|
|
# get swapped among disks, resulting in total data loss especially if encryption is being used.
|
|
allowNonPersistentDevlink: false
|
|
# -- Default blobstore cluster size for diskpools, in bytes.
|
|
# This value is used as a default value of blobstore cluster size on diskpools. This is set to 4MiB internally by default, if nothing specified here.
|
|
# The value is also configurable via Diskpool CR, which takes precedence over this setting. This is an advanced configuration, please refer documentation to
|
|
# understand the usage and implications of this.
|
|
poolClusterSize: ""
|
|
ha:
|
|
enabled: true
|
|
node:
|
|
# -- Log level for the ha node service
|
|
logLevel: info
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for ha node agent
|
|
cpu: "100m"
|
|
# -- Memory limits for ha node agent
|
|
memory: "64Mi"
|
|
requests:
|
|
# -- Cpu requests for ha node agent
|
|
cpu: "100m"
|
|
# -- Memory requests for ha node agent
|
|
memory: "64Mi"
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
# -- Container port for the ha-node service
|
|
port: 50053
|
|
cluster:
|
|
# -- Log level for the ha cluster service
|
|
logLevel: info
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for ha cluster agent
|
|
cpu: "100m"
|
|
# -- Memory limits for ha cluster agent
|
|
memory: "64Mi"
|
|
requests:
|
|
# -- Cpu requests for ha cluster agent
|
|
cpu: "100m"
|
|
# -- Memory requests for ha cluster agent
|
|
memory: "16Mi"
|
|
apis:
|
|
rest:
|
|
# -- Log level for the rest service
|
|
logLevel: info
|
|
healthProbes:
|
|
readiness:
|
|
# -- Toggle readiness probe.
|
|
enabled: true
|
|
# -- Frequency for the agent-core liveness probe.
|
|
agentCoreProbeFreq: "20s"
|
|
# -- No. of failures the readiness probe will tolerate.
|
|
failureThreshold: 2
|
|
# -- No. of seconds of delay before checking the readiness status.
|
|
initialDelaySeconds: 0
|
|
# -- No. of seconds between readiness probe checks.
|
|
periodSeconds: 20
|
|
# -- No. of seconds of timeout tolerance.
|
|
timeoutSeconds: 5
|
|
liveness:
|
|
# -- Toggle liveness probe.
|
|
enabled: true
|
|
# -- No. of failures the liveness probe will tolerate.
|
|
failureThreshold: 1
|
|
# -- No. of seconds of delay before checking the liveness status.
|
|
initialDelaySeconds: 0
|
|
# -- No. of seconds between liveness probe checks.
|
|
periodSeconds: 30
|
|
# -- No. of seconds of timeout tolerance.
|
|
timeoutSeconds: 5
|
|
# -- Number of replicas of rest
|
|
replicaCount: 1
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for rest
|
|
cpu: "100m"
|
|
# -- Memory limits for rest
|
|
memory: "64Mi"
|
|
requests:
|
|
# -- Cpu requests for rest
|
|
cpu: "50m"
|
|
# -- Memory requests for rest
|
|
memory: "32Mi"
|
|
# Rest service parameters define how the rest service is exposed
|
|
service:
|
|
# -- Rest K8s service type
|
|
type: ClusterIP
|
|
# Ports from where rest endpoints are accessible from outside the cluster, only valid if type is NodePort
|
|
nodePorts:
|
|
# NodePort associated with http port
|
|
http: 30011
|
|
# NodePort associated with https port
|
|
https: 30010
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global.
|
|
# If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default.
|
|
# Refer the `templates/_helpers.tpl` and `templates/mayastor/apis/rest/api-rest-deployment.yaml` for more details.
|
|
priorityClassName: ""
|
|
csi:
|
|
image:
|
|
# -- Image registry to pull all CSI Sidecar images
|
|
registry: registry.k8s.io
|
|
# -- Image registry's namespace
|
|
repo: sig-storage
|
|
# -- imagePullPolicy for all CSI Sidecar images
|
|
pullPolicy: IfNotPresent
|
|
# -- csi-provisioner image release tag
|
|
provisionerTag: v5.2.0
|
|
# -- csi-attacher image release tag
|
|
attacherTag: v4.8.1
|
|
# -- csi-snapshotter image release tag
|
|
snapshotterTag: v8.2.0
|
|
# -- csi-snapshot-controller image release tag
|
|
snapshotControllerTag: v8.2.0
|
|
# -- csi-node-driver-registrar image release tag
|
|
registrarTag: v2.13.0
|
|
# -- csi-resizer image release tag
|
|
resizerTag: v1.13.2
|
|
controller:
|
|
# -- Log level for the csi controller
|
|
logLevel: info
|
|
# The maximum number of concurrent create volume requests.
|
|
maxCreateVolume: 10
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for csi controller
|
|
cpu: "32m"
|
|
# -- Memory limits for csi controller
|
|
memory: "128Mi"
|
|
requests:
|
|
# -- Cpu requests for csi controller
|
|
cpu: "16m"
|
|
# -- Memory requests for csi controller
|
|
memory: "64Mi"
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
# -- Prevent modifying the volume mode when creating a PVC from an existing VolumeSnapshot
|
|
preventVolumeModeConversion: true
|
|
# Enable auto garbage collection of volume resources which were associated with `Retain` PV's.
|
|
# Before enabling this, make sure you're well aware of the dangerous downsides.
|
|
# For more information see: <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain>.
|
|
enableDangerousRetainGC: false
|
|
node:
|
|
logLevel: info
|
|
topology:
|
|
segments:
|
|
openebs.io/csi-node: mayastor
|
|
# -- Add topology segments to the csi-node and agent-ha-node daemonset node selector
|
|
nodeSelector: false
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for csi node plugin
|
|
cpu: "100m"
|
|
# -- Memory limits for csi node plugin
|
|
memory: "128Mi"
|
|
requests:
|
|
# -- Cpu requests for csi node plugin
|
|
cpu: "100m"
|
|
# -- Memory requests for csi node plugin
|
|
memory: "64Mi"
|
|
nvme:
|
|
# The nvme_core module and nvme block io timeout in humantime
|
|
# By default it uses the "io_engine.nvme.ioTimeout" + 10s
|
|
# Do not modify this unless you're really sure about its effects
|
|
io_timeout: ""
|
|
# -- The ctrl_loss_tmo (controller loss timeout) in seconds
|
|
ctrl_loss_tmo: "1980"
|
|
# Kato (keep alive timeout) in seconds
|
|
keep_alive_tmo: ""
|
|
# -- Fallback to nvme-tcp if nvme-rdma is enabled for Mayastor but rdma is not available on a particular csi-node
|
|
tcpFallback: true
|
|
# -- The kubeletDir directory for the csi-node plugin
|
|
kubeletDir: /var/lib/kubelet
|
|
pluginMountPath: /csi
|
|
socketPath: csi.sock
|
|
# Additional arguments when creating filesystems
|
|
mkfs_args:
|
|
xfs: ""
|
|
restClient:
|
|
enabled: true
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
initContainers:
|
|
enabled: false
|
|
containers:
|
|
- name: nvme-tcp-probe
|
|
command: ['sh', '-c', 'trap "exit 1" TERM; until [ -d /sys/module/nvme_tcp ]; do [ -z "$WARNED" ] && echo "nvme_tcp module not loaded..."; WARNED=1; sleep 60; done;']
|
|
# -- Container port for the csi-node service
|
|
port: 10199
|
|
io_engine:
|
|
# -- Log level for the io-engine service
|
|
logLevel: info
|
|
api: "v1"
|
|
target:
|
|
nvmf:
|
|
# -- Enable RDMA
|
|
# Capability of Mayastor nvmf target to take RDMA connections if the cluster nodes have RDMA device(s)
|
|
# configured from RNIC.
|
|
rdma:
|
|
enabled: false
|
|
# -- NVMF target interface (ip, mac, name or subnet)
|
|
# If RDMA is enabled, please set iface to an RDMA
|
|
# capable netdev name from host network. Example, if an rdma device mlx5_0 is
|
|
# available on a netdev eth0 on RNIC, as can be seen from `rdma link` command output,
|
|
# then this field should be set to eth0.
|
|
iface: ""
|
|
# -- Reservations Persist Through Power Loss State
|
|
ptpl: true
|
|
# NVMF target Command Retry Delay for volume target initiators
|
|
hostCmdRetryDelay:
|
|
# A command retry delay in milliseconds. A value of 0 means no delay, host may retry immediately
|
|
crdt1: 30
|
|
nvme:
|
|
# -- Timeout for IOs
|
|
# The default here is exaggerated for local disks, but we've observed that in
|
|
# shared virtual environments having a higher timeout value is beneficial.
|
|
# Please adjust this according to your hardware and needs.
|
|
ioTimeout: "110s"
|
|
# Timeout for admin commands
|
|
adminTimeout: "30s"
|
|
# Timeout for keep alives
|
|
keepAliveTimeout: "10s"
|
|
tcp:
|
|
# -- Max size setting (both initiator and target) for an NVMe queue
|
|
# -- You may need to increase this for a higher outstanding IOs per volume
|
|
maxQueueDepth: "32"
|
|
# Max qpairs per controller.
|
|
maxQpairsPerCtrl: "32"
|
|
# -- Pass additional arguments to the Environment Abstraction Layer.
|
|
# Example: --set {product}.envcontext=iova-mode=pa
|
|
envcontext: ""
|
|
reactorFreezeDetection:
|
|
enabled: false
|
|
# -- The number of cores that each io-engine instance will bind to.
|
|
cpuCount: "2"
|
|
# -- If not empty, overrides the cpuCount and explicitly sets the list of cores.
|
|
# Example: --set='io_engine.coreList={30,31}'
|
|
coreList: []
|
|
# -- Node selectors to designate storage nodes for diskpool creation
|
|
# Note that if multi-arch images support 'kubernetes.io/arch: amd64'
|
|
# should be removed.
|
|
nodeSelector:
|
|
openebs.io/engine: mayastor
|
|
kubernetes.io/arch: amd64
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for the io-engine
|
|
cpu: ""
|
|
# -- Memory limits for the io-engine
|
|
memory: "1Gi"
|
|
# -- Hugepage memory in 2MiB chunks
|
|
hugepages2Mi: "2Gi"
|
|
# -- Hugepage memory in 1GiB chunks
|
|
hugepages1Gi:
|
|
requests:
|
|
# -- Cpu requests for the io-engine
|
|
cpu: ""
|
|
# -- Memory requests for the io-engine
|
|
memory: "1Gi"
|
|
# -- Hugepage memory in 2MiB chunks
|
|
hugepages2Mi: "2Gi"
|
|
# -- Hugepage memory in 1GiB chunks
|
|
hugepages1Gi:
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
# -- Runtime class to use. Defaults to cluster standard
|
|
runtimeClassName: ""
|
|
# -- Number of retries for pstor persistence before the volume target self shutdowns
|
|
pstorRetries: 300
|
|
# -- Container port for the io-engine service
|
|
port: 10124
|
|
etcd:
|
|
# -- Disable when using an external etcd cluster.
|
|
enabled: true
|
|
# -- (string) Url of the external etcd cluster. Note, etcd.enable must be set to false.
|
|
externalUrl: ""
|
|
# Configuration for etcd's localpv hostpath storage class.
|
|
localpvScConfig:
|
|
enabled: true
|
|
# Name of etcd's localpv hostpath storage class.
|
|
name: "mayastor-etcd-localpv"
|
|
# -- Host path where local etcd data is stored in.
|
|
basePath: "/var/local/{{ .Release.Name }}/localpv-hostpath/etcd"
|
|
# -- ReclaimPolicy of etcd's localpv hostpath storage class.
|
|
reclaimPolicy: Delete
|
|
# -- VolumeBindingMode of etcd's localpv hostpath storage class.
|
|
volumeBindingMode: WaitForFirstConsumer
|
|
# Pod labels; okay to remove the openebs logging label if required
|
|
podLabels:
|
|
app: etcd
|
|
openebs.io/logging: "true"
|
|
# -- Number of replicas of etcd
|
|
replicaCount: 3
|
|
# -- Kubernetes Cluster Domain
|
|
clusterDomain: cluster.local
|
|
# TLS authentication for client-to-server communications
|
|
# ref: https://etcd.io/docs/current/op-guide/security/
|
|
auth:
|
|
client:
|
|
secureTransport: false
|
|
# TLS authentication for server-to-server communications
|
|
# ref: https://etcd.io/docs/current/op-guide/security/
|
|
peer:
|
|
secureTransport: false
|
|
rbac:
|
|
create: false
|
|
allowNoneAuthentication: true
|
|
# Enable persistence using Persistent Volume Claims
|
|
persistence:
|
|
# -- If true, use a Persistent Volume Claim. If false, use emptyDir.
|
|
enabled: true
|
|
# -- Will define which storageClass to use in etcd's StatefulSets. Options:
|
|
# <p> - `"manual"` - Will provision a hostpath PV on the same node. <br>
|
|
# - `""` (empty) - Will use the default StorageClass on the cluster. </p>
|
|
storageClass: "mayastor-etcd-localpv"
|
|
# -- Volume size
|
|
size: 2Gi
|
|
persistentVolumeClaimRetentionPolicy:
|
|
# -- PVC's reclaimPolicy
|
|
enabled: false
|
|
whenDeleted: Retain
|
|
whenScaled: Retain
|
|
# -- Use a PreStop hook to remove the etcd members from the etcd cluster on container termination
|
|
# Ignored if lifecycleHooks is set or replicaCount=1
|
|
removeMemberOnContainerTermination: false
|
|
# -- AutoCompaction
|
|
# Since etcd keeps an exact history of its keyspace, this history should be
|
|
# periodically compacted to avoid performance degradation
|
|
# and eventual storage space exhaustion.
|
|
# Auto compaction mode. Valid values: "periodic", "revision".
|
|
# - 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. 5m).
|
|
# - 'revision' for revision number based retention.
|
|
autoCompactionMode: revision
|
|
# -- Auto compaction retention length. 0 means disable auto compaction.
|
|
autoCompactionRetention: "100"
|
|
extraEnvVars:
|
|
# -- Raise alarms when backend size exceeds the given quota.
|
|
- name: ETCD_QUOTA_BACKEND_BYTES
|
|
value: "8589934592"
|
|
# Init containers parameters:
|
|
# volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
|
|
#
|
|
volumePermissions:
|
|
# chown the mounted volume; this is required if a statically provisioned hostpath volume is used
|
|
enabled: true
|
|
image:
|
|
registry: docker.io
|
|
repository: openebs/alpine-bash
|
|
tag: 4.3.0
|
|
pullSecrets: []
|
|
image:
|
|
registry: docker.io
|
|
repository: openebs/etcd
|
|
# extra debug information on logs
|
|
debug: false
|
|
# -- Pod anti-affinity preset
|
|
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
podAntiAffinityPreset: "hard"
|
|
## -- nodeSelector [object] Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
nodeSelector: {}
|
|
# etcd service parameters defines how the etcd service is exposed
|
|
service:
|
|
# K8s service type
|
|
type: ClusterIP
|
|
ports:
|
|
# etcd client port
|
|
client: 2379
|
|
# Specify the nodePort(s) value(s) for the LoadBalancer and NodePort service types.
|
|
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
#
|
|
nodePorts:
|
|
# Port from where etcd endpoints are accessible from outside cluster
|
|
client: 31379
|
|
peer: ""
|
|
tolerations: []
|
|
priorityClassName: ""
|
|
preUpgradeJob:
|
|
annotations:
|
|
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
|
|
loki:
|
|
enabled: true
|
|
# NOTE: For all possible storage options for loki, check https://github.com/openebs/mayastor-extensions/blob/HEAD/chart/loki-storage.md
|
|
# Configuration for loki's localpv hostpath storage class.
|
|
localpvScConfig:
|
|
enabled: true
|
|
loki:
|
|
# Name of loki's localpv hostpath storage class.
|
|
name: "mayastor-loki-localpv"
|
|
# -- Host path where local loki data is stored in.
|
|
basePath: "/var/local/{{ .Release.Name }}/localpv-hostpath/loki"
|
|
# -- ReclaimPolicy of loki's localpv hostpath storage class.
|
|
reclaimPolicy: Delete
|
|
# -- VolumeBindingMode of loki's localpv hostpath storage class.
|
|
volumeBindingMode: WaitForFirstConsumer
|
|
minio:
|
|
# Name of minio's localpv hostpath storage class.
|
|
name: "mayastor-minio-localpv"
|
|
# -- Host path where local minio data is stored in.
|
|
basePath: "/var/local/{{ .Release.Name }}/localpv-hostpath/minio"
|
|
# -- ReclaimPolicy of minio's localpv hostpath storage class.
|
|
reclaimPolicy: Delete
|
|
# -- VolumeBindingMode of minio's localpv hostpath storage class.
|
|
volumeBindingMode: WaitForFirstConsumer
|
|
loki:
|
|
serviceLabels:
|
|
app: loki
|
|
podLabels:
|
|
app: loki
|
|
schemaConfig:
|
|
configs:
|
|
- from: 2024-04-01
|
|
store: tsdb
|
|
object_store: s3
|
|
schema: v13
|
|
index:
|
|
prefix: loki_index_
|
|
period: 24h
|
|
commonConfig:
|
|
replication_factor: 3
|
|
ingester:
|
|
chunk_encoding: snappy
|
|
# Configure these if a quicker ingestion is needed, i.e. faster push to your bucket.
|
|
# chunk_idle_period: 3m
|
|
# chunk_retain_period: 1m
|
|
# max_chunk_age: 6m
|
|
tracing:
|
|
enabled: true
|
|
querier:
|
|
max_concurrent: 1
|
|
limits_config:
|
|
ingestion_burst_size_mb: 1000
|
|
ingestion_rate_mb: 10000
|
|
singleBinary:
|
|
replicas: 3
|
|
drivesPerNode: 1
|
|
persistence:
|
|
enabled: true
|
|
storageClass: "mayastor-loki-localpv"
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
size: 2Gi
|
|
minio:
|
|
replicas: 3
|
|
drivesPerNode: 1
|
|
mode: distributed
|
|
# Disable this if you want to enabled external s3 bucket, and uncomment the storage section above.
|
|
enabled: true
|
|
persistence:
|
|
storageClass: "mayastor-loki-localpv"
|
|
size: 2Gi
|
|
deploymentMode: SingleBinary
|
|
lokiCanary:
|
|
enabled: false
|
|
chunksCache:
|
|
enabled: false
|
|
test:
|
|
enabled: false
|
|
gateway:
|
|
enabled: false
|
|
resultsCache:
|
|
enabled: false
|
|
backend:
|
|
replicas: 0
|
|
read:
|
|
replicas: 0
|
|
write:
|
|
replicas: 0
|
|
ingester:
|
|
replicas: 0
|
|
querier:
|
|
replicas: 0
|
|
queryFrontend:
|
|
replicas: 0
|
|
queryScheduler:
|
|
replicas: 0
|
|
distributor:
|
|
replicas: 0
|
|
compactor:
|
|
replicas: 0
|
|
indexGateway:
|
|
replicas: 0
|
|
bloomCompactor:
|
|
replicas: 0
|
|
bloomGateway:
|
|
replicas: 0
|
|
sidecar:
|
|
image:
|
|
repository: docker.io/kiwigrid/k8s-sidecar
|
|
alloy:
|
|
logging_config:
|
|
# Enable debugging on alloy components.
|
|
debugging: false
|
|
# -- Labels to enable scraping on, at-least one of these labels should be present.
|
|
labels:
|
|
openebs.io/logging: true
|
|
# -- X-Scope-OrgID to pe populated which pushing logs. Make sure the caller also uses the same.
|
|
tenant_id: openebs
|
|
enabled: true
|
|
alloy:
|
|
mounts:
|
|
varlog: true
|
|
configMap:
|
|
create: true
|
|
content: |
|
|
livedebugging {
|
|
enabled = {{ .Values.logging_config.debugging }}
|
|
}
|
|
|
|
discovery.kubernetes "{{ .Release.Name }}_pods_name" {
|
|
role = "pod"
|
|
}
|
|
|
|
discovery.relabel "{{ .Release.Name }}_pods_name" {
|
|
targets = discovery.kubernetes.{{ .Release.Name }}_pods_name.targets
|
|
|
|
{{- $labels := .Values.logging_config.labels }}
|
|
{{- if $labels }}
|
|
{{- $keys := (keys $labels | sortAlpha) }}
|
|
|
|
rule {
|
|
source_labels = [
|
|
{{- range $key := $keys }}
|
|
"__meta_kubernetes_pod_label_{{ $key | replace "." "_" | replace "/" "_" }}",
|
|
{{- end }}
|
|
]
|
|
separator = ";"
|
|
regex = "^{{ include "regex_or" (dict "labels" $labels "keys" $keys) }}$"
|
|
action = "keep"
|
|
}
|
|
|
|
{{- end }}
|
|
|
|
rule {
|
|
regex = "__meta_kubernetes_pod_label_(.+)"
|
|
action = "labelmap"
|
|
}
|
|
|
|
rule {
|
|
regex = "__meta_kubernetes_pod_label_(.+)"
|
|
action = "labelmap"
|
|
}
|
|
|
|
rule {
|
|
source_labels = ["__meta_kubernetes_namespace"]
|
|
separator = "/"
|
|
target_label = "job"
|
|
}
|
|
|
|
rule {
|
|
source_labels = ["__meta_kubernetes_pod_name"]
|
|
target_label = "pod"
|
|
}
|
|
|
|
rule {
|
|
source_labels = ["__meta_kubernetes_pod_container_name"]
|
|
target_label = "container"
|
|
}
|
|
|
|
rule {
|
|
source_labels = ["__meta_kubernetes_pod_node_name"]
|
|
target_label = "hostname"
|
|
}
|
|
|
|
rule {
|
|
source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"]
|
|
separator = "/"
|
|
target_label = "__path__"
|
|
replacement = "/var/log/pods/*$1/*.log"
|
|
}
|
|
}
|
|
|
|
local.file_match "{{ .Release.Name }}_pod_files" {
|
|
path_targets = discovery.relabel.{{ .Release.Name }}_pods_name.output
|
|
}
|
|
|
|
loki.source.file "{{ .Release.Name }}_pod_logs" {
|
|
targets = local.file_match.{{ .Release.Name }}_pod_files.targets
|
|
forward_to = [loki.process.{{ .Release.Name }}_process_logs.receiver]
|
|
}
|
|
|
|
loki.process "{{ .Release.Name }}_process_logs" {
|
|
forward_to = [loki.write.default.receiver]
|
|
|
|
stage.docker { }
|
|
|
|
stage.replace {
|
|
expression = "(\\n)"
|
|
replace = ""
|
|
}
|
|
|
|
stage.multiline {
|
|
firstline = "^ \\x1b\\[2m(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).(\\d{6})Z"
|
|
}
|
|
|
|
stage.multiline {
|
|
firstline = "^ (\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).(\\d{6})Z"
|
|
}
|
|
}
|
|
|
|
loki.write "default" {
|
|
endpoint {
|
|
url = "http://{{ .Release.Name }}-loki:3100/loki/api/v1/push"
|
|
tenant_id = "{{ .Values.logging_config.tenant_id }}"
|
|
}
|
|
external_labels = {}
|
|
}
|
|
|
|
{{- define "regex_or" -}}
|
|
{{- $labels := .labels -}}
|
|
{{- $keys := .keys -}}
|
|
{{- $numKeys := len $keys -}}
|
|
{{- $regexParts := list -}}
|
|
{{- range $i, $key := $keys -}}
|
|
{{- $part := list -}}
|
|
{{- range $j := until $numKeys -}}
|
|
{{- if eq $j $i -}}
|
|
{{- $part = append $part (get $labels $key) -}}
|
|
{{- else -}}
|
|
{{- $part = append $part ".*" -}}
|
|
{{- end -}}
|
|
{{- end -}}
|
|
{{- $regexParts = append $regexParts (join ";" $part) -}}
|
|
{{- end -}}
|
|
{{- join "|" $regexParts -}}
|
|
{{- end -}}
|
|
# Eventing which enables or disables eventing-related components.
|
|
eventing:
|
|
enabled: true
|
|
# Configuration for the nats message-bus. This is an eventing component, and is enabled when
|
|
# 'eventing.enabled' is set to 'true'.
|
|
nats:
|
|
nats:
|
|
image:
|
|
pullPolicy: IfNotPresent
|
|
registry: docker.io
|
|
jetstream:
|
|
enabled: true
|
|
memStorage:
|
|
enabled: true
|
|
# Size of nats message is around 0.3 KB, so it can store around 10K messages.
|
|
size: "5Mi"
|
|
fileStorage:
|
|
enabled: false
|
|
# Affinity for pod assignment
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: nats
|
|
topologyKey: kubernetes.io/hostname
|
|
cluster:
|
|
enabled: true
|
|
replicas: 3
|
|
# Define if NATS is using FQDN name for clustering (i.e. nats-0.nats.default.svc.cluster.local) or short name (i.e. nats-0.nats.default).
|
|
useFQDN: false
|
|
statefulSetPodLabels:
|
|
app: nats
|
|
openebs.io/logging: "true"
|
|
# The nats box can be installed for debugging, by default its enabled.
|
|
natsbox:
|
|
enabled: false
|
|
image:
|
|
registry: docker.io
|
|
reloader:
|
|
image:
|
|
registry: docker.io
|
|
exporter:
|
|
image:
|
|
registry: docker.io
|
|
obs:
|
|
callhome:
|
|
# -- Enable callhome
|
|
enabled: true
|
|
# -- Log level for callhome
|
|
logLevel: "info"
|
|
sendReport: true
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for callhome
|
|
cpu: "100m"
|
|
# -- Memory limits for callhome
|
|
memory: "32Mi"
|
|
requests:
|
|
# -- Cpu requests for callhome
|
|
cpu: "50m"
|
|
# -- Memory requests for callhome
|
|
memory: "16Mi"
|
|
# -- Set tolerations, overrides global
|
|
tolerations: []
|
|
# -- Set PriorityClass, overrides global
|
|
priorityClassName: ""
|
|
# Eventing component enabled/disabled based on obs.callhome.enabled value
|
|
stats:
|
|
# -- Log level for stats
|
|
logLevel: "info"
|
|
resources:
|
|
limits:
|
|
# -- Cpu limits for stats
|
|
cpu: "100m"
|
|
# -- Memory limits for stats
|
|
memory: "32Mi"
|
|
requests:
|
|
# -- Cpu requests for stats
|
|
cpu: "50m"
|
|
# -- Memory requests for stats
|
|
memory: "16Mi"
|
|
service:
|
|
# -- Rest K8s service type
|
|
type: ClusterIP
|
|
# Ports from where rest endpoints are accessible from outside the cluster, only valid if type is NodePort
|
|
nodePorts:
|
|
# NodePort associated with http port
|
|
http: 90011
|
|
# NodePort associated with https port
|
|
https: 90010
|
|
storageClass:
|
|
enabled: true
|
|
nameSuffix: single-replica
|
|
default: false
|
|
# -- Enable volume expansion for the default StorageClass.
|
|
allowVolumeExpansion: true
|
|
parameters:
|
|
protocol: nvmf
|
|
repl: 1
|
|
localpv-provisioner:
|
|
# -- Enables the openebs dynamic-localpv-provisioner. If disabled, modify etcd and loki storage class accordingly.
|
|
enabled: true
|
|
localpv:
|
|
# -- Set the PriorityClass for the LocalPV Hostpath provisioner Deployment.
|
|
priorityClassName: "{{ .Release.Name }}-cluster-critical"
|
|
hostpathClass:
|
|
# -- Enable default hostpath localpv StorageClass.
|
|
enabled: false
|
|
analytics:
|
|
enabled: true
|
|
preUpgradeHook:
|
|
# -- Enable/Disable mayastor pre-upgrade hook
|
|
enabled: true
|
|
# -- Node tolerations for server scheduling to nodes with taints
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
|
##
|
|
tolerations: []
|
|
# -- Optional array of imagePullSecrets containing private registry credentials
|
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
imagePullSecrets: []
|
|
# - name: secretName
|
|
# Labels to be added to the Job Pod.
|
|
podLabels:
|
|
openebs.io/logging: "true"
|
|
# Extra annotations to be added to all the hook resources.
|
|
# This helps in debugging by leaving the Job/Pod instances lying around
|
|
annotations:
|
|
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
|
|
image:
|
|
# -- The container image registry URL for the hook job
|
|
registry: docker.io
|
|
# -- The container repository for the hook job
|
|
repo: openebs/kubectl
|
|
# -- The container image tag for the hook job
|
|
tag: "1.25.15"
|
|
# -- The imagePullPolicy for the container
|
|
pullPolicy: IfNotPresent
|