2256 lines
62 KiB
YAML
2256 lines
62 KiB
YAML
######################################
|
|
## Globals ##
|
|
######################################
|
|
|
|
global:
|
|
#image settings
|
|
image:
|
|
repository: docker.repository.axway.com/amplifyfusion-docker-prod
|
|
#pullPolicy the image pull policy
|
|
pullPolicy: IfNotPresent
|
|
# imagePullSecrets - secret that stores credentials for access to a registry for docker images
|
|
# This secret is the image pull secrets used by all the charts
|
|
# Use your Axway credentials if pulling from the Axway repository
|
|
# For a customer: create the secret:
|
|
# REGISTRY_USERNAME= # credentials from your service account
|
|
# REGISTRY_PASSWORD= # credentials from your service account
|
|
# REGISTRY_SERVER=docker.repository.axway.com
|
|
# kubectl create secret docker-registry ampint-docker-artifactory --docker-server="${REGISTRY_SERVER}" --docker-username="${REGISTRY_USERNAME}" --docker-password="${REGISTRY_PASSWORD"
|
|
imagePullSecrets:
|
|
- name: ampint-docker-artifactory
|
|
# If dockerconfigjson is set it will create a docker-registry secret called ampint-docker-artifactory using that value
|
|
# If secrets are being managed externally leave this value blank
|
|
createPullSecret:
|
|
dockerconfigjson: ""
|
|
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
initContainers:
|
|
resources:
|
|
limits:
|
|
cpu: 100m
|
|
memory: 100Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 100Mi
|
|
|
|
# unique identifier for your Fusion
|
|
appEnv: apilab
|
|
|
|
# Chose here the HOSTNAME for your Fusion:
|
|
external_domain: "dp.apilab.us"
|
|
|
|
# The domain name for your control plane. Only used when deploying a shared Fusion, in which case this must be set.
|
|
ctlplane_domain: ""
|
|
# Prefix for envoy loadbalancer domain name when deploying a shared Fusion i.e <shared_lb_prefix>.<external_domain>
|
|
shared_lb_prefix: "ha-lb"
|
|
|
|
# if multihost is enabled, hostnames will be a combination of prefix and external_domain
|
|
# it will be: <multihost.listeners.tls.webhook.hostPrefix>.<external_domain> (for example here: apim.your-fusion-domain.com)
|
|
multihost:
|
|
enabled: false
|
|
listeners:
|
|
ssh:
|
|
port: 22
|
|
sftp:
|
|
enabled: true
|
|
hostPrefix: sftp-server
|
|
tls:
|
|
port: 443
|
|
https:
|
|
enabled: true
|
|
hostPrefix: https-server
|
|
webhook:
|
|
enabled: true
|
|
hostPrefix: webhook-server
|
|
api:
|
|
enabled: true
|
|
hostPrefix: api-server
|
|
api_mtls:
|
|
enabled: true
|
|
hostPrefix: api-mtls-server
|
|
tcp:
|
|
port: 80
|
|
http:
|
|
enabled: true
|
|
hostPrefix: http-server
|
|
|
|
# claimName a reference to the file share claim name
|
|
claimName: sharedstorage-claim
|
|
# volumeStorageName the volume name of the persistent fileshare
|
|
volumeStorageName: fileshare-storage
|
|
# clusterKey is obtained from the Control Plane User Interface
|
|
clusterKey: "eyJpZCI6ImNiNTRmOTYwLTBhNmYtNGM0OC1hYjgzLTIyY2QyOGQxZjQyNiIsIm5hbWUiOiJBcGlsYWIiLCJ0ZW5hbnQiOiJjeDM3IiwibW9kZSI6ImRlc2lnbiIsInNlY3JldEtleSI6IjZlY2JhYjliZTc0NTU1M2NjOTJiYWUwZTAzM2UyZjY0MmEwYmUyOTZiYWM5MTY0NDZjNmQwNGQxZmVkNmNmN2YiLCJkb21haW4iOiJzYW5kYm94LmZ1c2lvbi5zZXJ2aWNlcy5heHdheS5jb20iLCJkbnMiOiJhdXMiLCJpbV91cmwiOiJ3c3M6Ly9zZXJ2aWNlcy5zYW5kYm94LmZ1c2lvbi5zZXJ2aWNlcy5heHdheS5jb20vbW9uaXRvci93cyIsImF1ZGl0X3NlcnZpY2VfdXJsIjoid3NzOi8vc2VydmljZXMuc2FuZGJveC5mdXNpb24uc2VydmljZXMuYXh3YXkuY29tL2F1ZGl0c2VydmljZS93cyIsImRlc2NyaXB0aW9uIjoiQXBpbGFiIE9DUCBEYXRhIFBsYW5lIiwicnVudGltZVR5cGUiOiJQUklWQVRFX0NMT1VEIiwiY2x1c3RlclJlZklkIjoiY2I1NGY5NjAwYTZmNGM0OGFiODMyMmNkMjhkMWY0MjYiLCJjbHVzdGVyQWxpYXMiOiJDdXN0b21lciBNYW5hZ2VkIiwiY29ubmVjdGVkIjpmYWxzZSwiYWN0aXZlIjp0cnVlfQ=="
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
alpinetools:
|
|
image:
|
|
name: 1.13/alpine-tools
|
|
tag: 1.4.4
|
|
|
|
######################################
|
|
## Common Chart ##
|
|
######################################
|
|
|
|
common:
|
|
# enabled means the common chart is enabled
|
|
enabled: true
|
|
|
|
######################################
|
|
## Environment config ##
|
|
######################################
|
|
|
|
# Select here the emails targets for Fusion (for example for admin email notification)
|
|
admin_email: "dp-admin@apilab.us" # for example: "myadminemail@example.com"
|
|
email_host: "mailhog.mailhog.svc.cluster.local" # Email host name -for example smtp.office365.com
|
|
email_port: "1025" #port number for example 587
|
|
email_username: "dp-admin@apilab.us"
|
|
email_usetls: false
|
|
# set to true if email password is required
|
|
email_authentication: false
|
|
# email from address
|
|
email_frommailid: "dp-admin@apilab.us" # eg myemail@example.com
|
|
|
|
######################################
|
|
## General conditions ##
|
|
######################################
|
|
|
|
# acceptGeneralConditions In order to install Fusion, you need to set acceptGeneralConditions to "yes":
|
|
# You hereby accept that the Axway Products and/or Services shall be governed exclusively by the Axway General Terms and Conditions located at Axway General Conditions,
|
|
# https://www.axway.com/sites/default/files/Legal_documents/License_general_conditions/Axway_General_Conditions_version_april_2014_eng_(France).pdf
|
|
# unless an agreement has been signed with Axway in which case such agreement shall apply.
|
|
acceptGeneralConditions: "yes"
|
|
|
|
####################################################
|
|
## [optional] Certificate auto-generation ##
|
|
####################################################
|
|
|
|
certificate:
|
|
# The key reference to the cert in the domain-certificate secret. If using cert-manager, leave as 'keystore.p12'
|
|
# To create the required secret yourself run: `kubectl create secret generic domain-certificate --from-file=keystore.p12=/path/to/cert`
|
|
name: "keystore.p12"
|
|
enabled: true
|
|
# Set the password for your certificate, or leave empty and create 'certificate-password' secret external to helm deployment
|
|
# A password is needed whether you create the certificate yourself or generate the secret using the cert-manager feature below
|
|
# 'certificate-password' secret should have 1 data entry with the key 'password'
|
|
password: "dGgxcnQzM25sZXR0ZXJTLg==" # base 64 encoded PKCS 12 password: printf 'mystorepassword' | base64 -w 0
|
|
generate:
|
|
# Set generate.enabled to true if you have cert-manager configured and want to use it to generate and auto-renew your certificate
|
|
# warning: if cert-manager isn't installed already, the certificate CRD may not be exist yet, and your installation will be rejected
|
|
enabled: false
|
|
#duration that the cert is valid for
|
|
duration: 2190h # 3 months
|
|
#renewBefore cert will be renewed this time before expire date
|
|
renewBefore: 168h # 1 week
|
|
subject:
|
|
#optional organizations to be used on the certificate
|
|
organizations:
|
|
- fusion
|
|
issuerRef:
|
|
#name -the name of the ClusterIssuer resource in your environment
|
|
name: letsencrypt-dns01-cloudflare
|
|
kind: ClusterIssuer
|
|
|
|
####################################################
|
|
## [optional] domain-certificate Watch ##
|
|
####################################################
|
|
|
|
domainCertWatch:
|
|
enabled: false
|
|
# Schedule for running the log rotation job
|
|
schedule: "0 4 * * *" # runs every day at 4
|
|
|
|
nameOverride: ""
|
|
|
|
# service account config
|
|
serviceAccount:
|
|
enabled: true
|
|
preexisting: false
|
|
annotations: {}
|
|
name: domain-cert-watch
|
|
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
securityContext:
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsUser: 10010
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsGroup: 10020
|
|
runAsNonRoot: true
|
|
allowPrivilegeEscalation: false
|
|
readOnlyRootFilesystem: true
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
podSecurityContextEnabled: true
|
|
|
|
podSecurityContext:
|
|
supplementalGroups:
|
|
- 1001
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
fsGroup: 1001
|
|
|
|
#Number of seconds a job should live after it is finished.
|
|
job_ttl: "43200"
|
|
|
|
######################################
|
|
## Secrets ##
|
|
######################################
|
|
# This block generates secrets that will be used by the other Helm charts
|
|
# ALL SECRETS HAVE TO BE ENCODED into base 64 (to avoid YAML issues):
|
|
|
|
# frommail secret settings
|
|
fromMail:
|
|
name: "frommail"
|
|
# set to true if managing secrets external to helm deployment
|
|
existingSecret: false
|
|
# Values if not using existing secret
|
|
# This secret allow Fusion to send emails from a valid email address that you control. Fill in with valid, preexisting credentials
|
|
password: "" # base 64 encoded credentials: printf 'mypassword' | base64 -w 0
|
|
|
|
# datagrid-credentials secret settings - could be valkey or other inmemory db perhaps
|
|
datagridCredentials:
|
|
name: "datagrid-credentials"
|
|
# set to true if managing secrets external to helm deployment
|
|
existingSecret: false
|
|
# Values if not using existing secret
|
|
password: "YWRtaW4=" # base 64 encoded password
|
|
|
|
# cluster-details secret settings
|
|
clusterDetails:
|
|
name: "cluster-details"
|
|
# set to true if managing secrets external to helm deployment
|
|
existingSecret: false
|
|
# Values if not using existing secret
|
|
# If not deploying a shared Fusion, do not set. These will be retrieved from your clusterKey
|
|
# When deploying a shared Fusion, these values can be retrieved from your control plane.
|
|
jwtSecret: ""
|
|
clusterId: ""
|
|
|
|
####################################################
|
|
## Persistence Setup
|
|
####################################################
|
|
|
|
#common settings for persistence of data
|
|
persistence:
|
|
# claimRequestCapacity the claim's requested storage capacity size
|
|
claimRequestCapacity: 5Gi
|
|
# claimAccessMode the claim's mount access mode
|
|
claimAccessMode: ReadWriteMany
|
|
# volumeCapacity the volume's name
|
|
volumeName: shared-volume
|
|
# volumeCapacity the volume's storage capacity size
|
|
volumeCapacity: 5Gi
|
|
# volumeReclaimPolicy the volume's reclaim policy
|
|
volumeReclaimPolicy: Retain
|
|
# volumeAccessMode the volume's mount mode
|
|
volumeAccessMode: ReadWriteMany
|
|
|
|
## Azure Files ##
|
|
azfiles:
|
|
enabled: false
|
|
## Azure FileShare ##
|
|
#If you are using Azure FileShare, modify settings in here
|
|
|
|
resourceGroup: "" # where AKS cluster is installed ex: prf01-ampint-ddp-rg01
|
|
storageAccountName: "" # Storage account name where file storage created ex: prf02ampintddpsa01
|
|
fileshareName: "" # Name of your fileshare tst01ampintdd02afs01
|
|
|
|
# azurefs-secret this secret is used to get access to azure file system storage use storage account name and its key values.
|
|
# set existingSecret to true if managing secrets external to helm deployment
|
|
secretName: "azurefs-secret"
|
|
existingSecret: false
|
|
azureStorageAccountKey: "" # Storage account key1 value base 64 encoded password to configure: printf 'password14' | base64 -w 0
|
|
|
|
# when using azure files for the backing shared persistent volume, with the SMB protocole, you can use this option to facilitate the upgrade
|
|
forceCloseHandles:
|
|
enabled: false
|
|
# the fileshare name
|
|
# it must have the role "Storage File Data Privileged Reader" (scope "storage", resource: your storage account)
|
|
vmssManagedIdentityClientId: ""
|
|
|
|
## Amazon EFS ##
|
|
#If you are using Amazon EFS, modify settings in here
|
|
efs:
|
|
enabled: false # set it to true
|
|
#volumeHandle the volume handle name. Composition of EFS "file system id::access point id". example fs-xxxxxxxxx::fsap-xxxxxxxxx
|
|
volumeHandle: ""
|
|
|
|
## Standard NFS ##
|
|
#If you are using Standard NFS, modify settings in here
|
|
nfs:
|
|
enabled: false # set it to true
|
|
staticPvc: false # if there is already NFS-server is ready, make this staticPvc false.
|
|
storageClassName: "nfs-csi" #provide the storageClassName
|
|
server: "" # example - fs-003ea2414e03f749d.efs.ap-south-1.amazonaws.com # put your nfs server info here
|
|
path: / # if you are not using / but a subdirectory, then make sure it already exists. Make sure gid 1001 is allowed to rw in that directory (e.g. perform a chown 1001:1001 . in your NFS)
|
|
|
|
# use csi or nfs depending on how you want to mount the volume directly as nfs or through the nfs csi driver
|
|
# if you use csi, make sure you have installed the csi driver:
|
|
# helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
|
|
# helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v4.1.0
|
|
mode: csi
|
|
mountOptions:
|
|
- nfsvers=3 # put your mount options here, as a yaml list
|
|
|
|
## GENERIC PVC ##
|
|
## PVC and PV based on storage class only given by the K8S/OCP Administrator
|
|
pvc:
|
|
enabled: true
|
|
claimAccessMode: ReadWriteMany
|
|
claimRequestCapacity: "10Gi"
|
|
storageClass: "nfs-csi"
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
#Environments properties
|
|
s3_payload: "false"
|
|
sftp_server_port: "2222"
|
|
|
|
#email details shared Fusion
|
|
support_email: ""
|
|
|
|
#Provide control plane information for clustering
|
|
clusterRefId: '{{ (eq .Values.global.clusterKey "") | ternary "" (printf "%s" (.Values.global.clusterKey | b64dec | fromJson).clusterRefId) }}'
|
|
truststore: truststore.jks
|
|
|
|
# Set to true to manage ConfigMaps separately to the helm chart deployment
|
|
#### WARNING!! ####
|
|
# Values set in configMaps are critical to the functionality of the application
|
|
# Names and values must be consistent with what is expected
|
|
# If these are not managed correctly, the application may not work as intended
|
|
externalConfigMaps: false
|
|
|
|
# Set to false if we want a behavior where fluent bit daemon should tap to the log stream of the pods i.e. var/log/containers,
|
|
# we should make it false for our SAAS solution as fluent bit is allowed to run with root user permission.
|
|
# we kept default value as true for our custom managed solution where environments are restricted, and we rely on application file system logging instead of console logging and fluent bit setup.
|
|
logFileEnabled: true
|
|
|
|
# OpenTelemetry settings
|
|
openTelemetry:
|
|
enabled: false # Set to true if we want to generate the traces
|
|
exporter:
|
|
grpc:
|
|
enabled: false # Set to true if we want to export traces using gRPC
|
|
endpoint: "http://fluent-bit:4317"
|
|
http:
|
|
enabled: false # Set to true if we want to export traces using HTTP
|
|
endpoint: "http://fluent-bit:4318"
|
|
|
|
######################################
|
|
## Envoy Chart ##
|
|
######################################
|
|
|
|
envoy:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.12.2/envoyproxy
|
|
tag: distroless-v1.34.4
|
|
|
|
######################################
|
|
## Service ##
|
|
######################################
|
|
|
|
service:
|
|
type: LoadBalancer
|
|
ports:
|
|
- name: ssh
|
|
protocol: TCP
|
|
port: 9022
|
|
targetPort: 2222
|
|
- name: apim
|
|
protocol: TCP
|
|
port: 4443
|
|
targetPort: 4443
|
|
- name: apim-mtls
|
|
protocol: TCP
|
|
port: 5443
|
|
targetPort: 5443
|
|
- name: webhook
|
|
protocol: TCP
|
|
port: 443
|
|
targetPort: 8443
|
|
- name: https
|
|
protocol: TCP
|
|
port: 9443
|
|
targetPort: 9443
|
|
- name: http
|
|
protocol: TCP
|
|
port: 9080
|
|
targetPort: 9080
|
|
|
|
######################################
|
|
## Routes - Openshift Specific ##
|
|
######################################
|
|
|
|
# In order to use routes multihost must first be enabled in the parent chart
|
|
# Enabling route will then create an openshift route for that traffic type using the multihost format.
|
|
# This will be: <global.multihost.listeners.tls.webhook.hostPrefix>.<external_domain>.<external_domain>
|
|
# (example: apim.your-fusion-domain.com)
|
|
|
|
route:
|
|
http:
|
|
enabled: false
|
|
https:
|
|
enabled: false
|
|
webhook:
|
|
enabled: false
|
|
api:
|
|
enabled: false
|
|
sftp:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
|
|
# envoy startup probe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /started
|
|
port: internal
|
|
failureThreshold: 180
|
|
periodSeconds: 1
|
|
initialDelaySeconds: 30
|
|
timeoutSeconds: 1
|
|
successThreshold: 1
|
|
|
|
# envoy liveness probe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /
|
|
port: admin
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 120
|
|
timeoutSeconds: 30
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
# envoy readiness probe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /ready
|
|
port: admin
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 60
|
|
timeoutSeconds: 30
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
preStopSleepSeconds: "40s"
|
|
terminationGracePeriodSeconds: 3600
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
allowPrivilegeEscalation: false
|
|
readOnlyRootFilesystem: false
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for envoy
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "envoy"
|
|
automountServiceAccountToken: true
|
|
|
|
podAnnotations: {}
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
calicoNetpol:
|
|
enabled: false
|
|
# reference the targets the inbound-worker is allowed to reach
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidre here
|
|
# reference the source IPs allowed into envoy
|
|
subnetEntityRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'nodes-subnet-vpc-cidr'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 10.0.0.0/24 # put your subnet cidr here
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
|
|
replicaCount: 1
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 500m
|
|
memory: 500Mi
|
|
requests:
|
|
cpu: 500m
|
|
memory: 500Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
files: {}
|
|
templates: {}
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
# The proxy admin port is not externally accessible by default, override this to
|
|
# true in order to invoke the proxy admin APIs
|
|
exposeProxyAdminPort: false
|
|
# Override this if exposeProxyAdminPort=true and a different port is required for the proxy admin APIs.
|
|
proxyAdminPort: 9901
|
|
|
|
adminPorts:
|
|
admin:
|
|
containerPort: 9901
|
|
protocol: TCP
|
|
internal:
|
|
protocol: TCP
|
|
containerPort: 9902
|
|
|
|
ports:
|
|
apim:
|
|
protocol: TCP
|
|
containerPort: 4443
|
|
apim-mtls:
|
|
protocol: TCP
|
|
containerPort: 5443
|
|
webhook:
|
|
protocol: TCP
|
|
containerPort: 8443
|
|
https:
|
|
protocol: TCP
|
|
containerPort: 9443
|
|
http:
|
|
protocol: TCP
|
|
containerPort: 9080
|
|
ssh:
|
|
protocol: TCP
|
|
containerPort: 2222
|
|
|
|
timeouts:
|
|
# Loadbalancer timeout higher than request to allow for clean closing of request.
|
|
loadbalancer: 3700
|
|
maxDuration: 3600s
|
|
idleTimeout: 3600s
|
|
connectTimeout: 15s
|
|
|
|
command:
|
|
- /usr/local/bin/envoy
|
|
|
|
argsTemplate: |-
|
|
- -l
|
|
- $(LOGLEVEL)
|
|
- -c
|
|
- /config/envoy.yaml
|
|
|
|
logLevel: info
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: LOG_LEVEL
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
|
|
######################################
|
|
## Inbound-Worker Chart ##
|
|
######################################
|
|
|
|
inboundWorker:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.13.1/inbound-worker
|
|
buildTag: 1.13.1
|
|
|
|
######################################
|
|
## Service ##
|
|
######################################
|
|
|
|
service:
|
|
type: ClusterIP
|
|
ports:
|
|
- name: internal
|
|
protocol: TCP
|
|
port: 8080
|
|
targetPort: 8080
|
|
- name: sse
|
|
protocol: TCP
|
|
port: 7080
|
|
targetPort: 7080
|
|
- name: openapi
|
|
protocol: TCP
|
|
port: 9080
|
|
targetPort: 9080
|
|
- name: ssh
|
|
protocol: TCP
|
|
port: 2222
|
|
targetPort: 2222
|
|
- name: service
|
|
protocol: TCP
|
|
port: 9443
|
|
targetPort: 9443
|
|
|
|
sftpService:
|
|
enabled: false
|
|
type: NodePort
|
|
ports:
|
|
- name: ssh
|
|
protocol: TCP
|
|
port: 9022
|
|
targetPort: 9022
|
|
nodePort: 32222
|
|
|
|
######################################
|
|
## Persistence ##
|
|
######################################
|
|
#EBS_hostPath
|
|
ebs_root: "/transaction-data"
|
|
txn_log_base_path: "/transaction-logs"
|
|
|
|
#EFS_Filesystem
|
|
efs_txn_log_path: "/efs/transaction-logs"
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
|
|
#inbound worker startupProbe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /q/health/started
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 2
|
|
timeoutSeconds: 2
|
|
failureThreshold: 60
|
|
successThreshold: 1
|
|
|
|
#inbound worker livenessProbe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /q/health/live
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
#inbound worker readinessProbe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /q/health/ready
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
preStopSleepSeconds: "40s"
|
|
terminationGracePeriodSeconds: 3600
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for inbound-worker
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "inbound-worker"
|
|
automountServiceAccountToken: true
|
|
|
|
hostPath:
|
|
enabled: false
|
|
|
|
podAnnotations: {}
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
# reference the targets the inbound-worker is allowed to reach
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidre here
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
|
|
replicaCount: 1
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 500m
|
|
memory: 4000Mi
|
|
requests:
|
|
cpu: 500m
|
|
memory: 800Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
javaOpts: "-XX:+UseG1GC -XX:-G1PeriodicGCInvokesConcurrent -XX:G1PeriodicGCInterval=300000 -XX:MaxRAMPercentage=75"
|
|
# Date after which missed events will be rescheduled. Should be in Zulu time.
|
|
# The expected format is YYYY-MM-DDTHH:MMZ, for example "2025-11-13T12:00Z"
|
|
only_reschedule_missed_events_newer_than: ""
|
|
stale_process_interval: "30m"
|
|
QUARKUS_LOG_CONSOLE_LEVEL: "INFO"
|
|
max_payload_size_kbs: "50"
|
|
heartbeat_time_interval_sec: "300"
|
|
heartbeat_timeout_sec: "900"
|
|
leader_lease_duration: "6"
|
|
leader_lease_renew_period: "4"
|
|
leader_lease_retry_period: "1"
|
|
delegate_inoperative_pod_events_interval_sec: "2s"
|
|
enable_legacy_tls: "false"
|
|
# Mail trigger interval in seconds for inbound worker web socket connection failure
|
|
mailTriggerInterval: 300
|
|
datagrid_subscriptions_per_connection: 20 # Number of subscriptions allowed per connection in the Valkey client
|
|
datagrid_subscription_connection_pool_size: 100 # Size of the subscription connection pool in the Valkey client
|
|
|
|
#AWS Credentials
|
|
aws:
|
|
enabled: false
|
|
Access_Key: xxxxxxxxxxxx
|
|
Secret_Key: xxxxxxxxxxx
|
|
Region: xxxxxxxxxx
|
|
|
|
#SFTP and HTTPS Connections
|
|
sftp_enable: "true"
|
|
http_enable: "true"
|
|
https_enable: "true"
|
|
api_enable: "true"
|
|
api_mtls_enable: "true"
|
|
|
|
# Until mTLS enforcement is configured in Envoy we need to _not_ terminate TLS to avoid client cert prompt. (EIPAAS-4353)
|
|
http_tls_termination: "false"
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
#SAP dependencies configuration
|
|
sap_lib_directory: "false"
|
|
sap_enabled: false
|
|
|
|
# New salesforce event configuration
|
|
salesforce:
|
|
# <host>{:<port>} Pub/Sub API Endpoint
|
|
# default: api.pubsub.salesforce.com (port 443 or 7443)
|
|
pubsub_api_url: ""
|
|
# Retry configuration:
|
|
# - initial backoff duration in ISO 8601 format
|
|
# default: PT5M
|
|
retry_initial_backoff_duration: ""
|
|
# - max backoff duration in ISO 8601 format
|
|
# default: PT1H
|
|
retry_max_backoff_duration: ""
|
|
# - Comma separated list of GRPC status codes to retry
|
|
# default: UNAVAILABLE,ABORTED,FAILED_PRECONDITION,RESOURCE_EXHAUSTED
|
|
retry_status_code_list: ""
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: LOG_LEVEL
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
|
|
######################################
|
|
## Orchestrator Chart ##
|
|
######################################
|
|
|
|
orchestrator:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.13.1/orchestrator
|
|
buildTag: 1.13.1
|
|
|
|
######################################
|
|
## Service ##
|
|
######################################
|
|
|
|
service:
|
|
ports:
|
|
- name: http-port
|
|
protocol: TCP
|
|
port: 1919
|
|
targetPort: 1919
|
|
- name: grpc-port
|
|
protocol: TCP
|
|
port: 50051
|
|
targetPort: 50051
|
|
- name: sse-port
|
|
protocol: TCP
|
|
port: 50052
|
|
targetPort: 50052
|
|
|
|
######################################
|
|
## Persistence ##
|
|
######################################
|
|
#EBS_hostPath
|
|
ebs_root: "/transaction-data"
|
|
txn_log_base_path: "/transaction-logs"
|
|
|
|
#EFS_Filesystem
|
|
efs_txn_log_path: "/efs/transaction-logs"
|
|
|
|
#JWT_TTL
|
|
jwtExpirationTimeout: 10
|
|
|
|
# NFS Connector config - disabled by default
|
|
connectors:
|
|
# NFS connector config
|
|
nfs:
|
|
# enable/disable NFS volumes mount
|
|
enabled: false
|
|
mode: csi
|
|
# Configure NFS volumes mounted into the orchestrator file system.
|
|
# Volumes are mounted under /connectors/nfs/<name>
|
|
volumes:
|
|
- name: "local-volume-name"
|
|
# server IP : sl1087339.aabc
|
|
server: "nfs-server"
|
|
# share path : /nf-op-buch
|
|
share: "nfs-server-share"
|
|
mountOptions: # put your mount options here, as a yaml list
|
|
- nfsvers=4.1
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
|
|
#orchestrator startupProbe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /q/health/started
|
|
port: 1919
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 2
|
|
timeoutSeconds: 2
|
|
failureThreshold: 60
|
|
successThreshold: 1
|
|
|
|
#orchestrator readinessProbe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /q/health/ready
|
|
port: 1919
|
|
periodSeconds: 5
|
|
timeoutSeconds: 2
|
|
failureThreshold: 1
|
|
successThreshold: 1
|
|
|
|
#orchestrator livenessProbe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /q/health/live
|
|
port: 1919
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
terminationGracePeriodSeconds: 3600
|
|
preStopSleepSeconds: "40s"
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for orchestrator
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "orchestrator"
|
|
automountServiceAccountToken: true
|
|
|
|
podAnnotations: {}
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
# reference the targets the orchestrator is allowed to reach
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidr here
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
|
|
replicaCount: 1
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 1500m
|
|
memory: 4000Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 1000Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 2
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
#JavaOpts and executions-limits
|
|
javaOpts: "-XX:+UseG1GC -XX:-G1PeriodicGCInvokesConcurrent -XX:G1PeriodicGCInterval=300000 -XX:MaxRAMPercentage=75"
|
|
enable_legacy_tls: false
|
|
# max_executions_limit number of parallel executions allowed
|
|
max_executions_limit: "20"
|
|
# critical_memory_buffer minimum amount of free memory (in MiB) available before orchestration can commence
|
|
critical_memory_buffer: "64"
|
|
throw_exception_on_fail: "false"
|
|
stale_process_interval: "30m"
|
|
QUARKUS_LOG_CONSOLE_LEVEL: "INFO" # Console log level for inbound server and orchestrator
|
|
max_payload_size_kbs: "20"
|
|
heartbeat_time_interval_sec: "300"
|
|
heartbeat_timeout_sec: "900"
|
|
#AWS Credentials
|
|
aws:
|
|
enabled: false
|
|
Access_Key: xxxxxxxxxxxx
|
|
Secret_Key: xxxxxxxxxxx
|
|
Region: xxxxxxxxxx
|
|
|
|
#cidr_range block internal ip ranges (format exanple )0.73.0.0/16,172.21.0.0/16
|
|
cidr_range: ""
|
|
|
|
#SAP dependencies configuration
|
|
sap_lib_directory: "false"
|
|
sap_enabled: false
|
|
|
|
hostPath:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: quarkus.log.level
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
|
|
######################################
|
|
## Operator Chart ##
|
|
######################################
|
|
|
|
fusionOperator:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.11.1/fusion-operator
|
|
buildTag: 1.1.0
|
|
|
|
#serviceaccount for operator
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "fusion-operator"
|
|
automountServiceAccountToken: true
|
|
|
|
job:
|
|
#serviceaccount for crd job
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "fusion-operator-job"
|
|
automountServiceAccountToken: true
|
|
|
|
removeJob_ttl: 3600
|
|
updateJob_ttl: 3600
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
|
|
replicaCount: 1
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 500m
|
|
memory: 512Mi
|
|
requests:
|
|
cpu: 50m
|
|
memory: 64Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 2
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
|
|
# fusionOperator Liveness probe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /healthz
|
|
port: 8081
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 30
|
|
timeoutSeconds: 10
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
# fusionOperator readinessProbe probe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /readyz
|
|
port: 8081
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 30
|
|
timeoutSeconds: 10
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
# fusionOperator startupProbe probe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /readyz
|
|
port: 8081
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 30
|
|
successThreshold: 1
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
######################################
|
|
## Leader Election ##
|
|
######################################
|
|
|
|
leaderElection:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
# reference the targets the fusion-operator is allowed to reach
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
ports:
|
|
- 443
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidre here
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: LOG_LEVEL
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
|
|
######################################
|
|
## PEP Server ##
|
|
######################################
|
|
|
|
pepServer:
|
|
enabled: true
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.13.1/pep-server
|
|
buildTag: 1.13.1
|
|
|
|
######################################
|
|
## Service ##
|
|
######################################
|
|
|
|
service:
|
|
type: ClusterIP
|
|
ports:
|
|
- name: xds
|
|
protocol: TCP
|
|
port: 9090
|
|
targetPort: 9090
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
|
|
# pepServer: startupProbe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /q/health/started
|
|
port: internal
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 2
|
|
timeoutSeconds: 2
|
|
failureThreshold: 60
|
|
successThreshold: 1
|
|
|
|
# pepServer: readinessProbe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /q/health/ready
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
# pepServer: livenessProbe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /q/health/live
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
preStopSleepSeconds: "40s"
|
|
terminationGracePeriodSeconds: 3600
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
#serviceaccount for inbound-worker
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "pep-server"
|
|
automountServiceAccountToken: true
|
|
|
|
podAnnotations: {}
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
|
|
replicaCount: 1
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 1000m
|
|
memory: 1000Mi
|
|
requests:
|
|
cpu: 500m
|
|
memory: 500Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
|
|
javaOpts: "-XX:+UseG1GC -XX:-G1PeriodicGCInvokesConcurrent -XX:G1PeriodicGCInterval=300000 -XX:MaxRAMPercentage=90"
|
|
QUARKUS_LOG_CONSOLE_LEVEL: "INFO"
|
|
extproc_background_cleanup_duration: "3000"
|
|
grpc_server_drain: "30"
|
|
idleTimeout: 3600s
|
|
extAuthzTimeout: 1000ms
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: quarkus.log.level
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
|
|
#######################################
|
|
## Post-deploy Chart ##
|
|
######################################
|
|
postdeploy:
|
|
enabled: true
|
|
|
|
#Number of seconds a job should live after it is finished.
|
|
job_ttl: "3600"
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for postdeploy
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "postdeploy"
|
|
automountServiceAccountToken: true
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Predeploy Chart ##
|
|
######################################
|
|
|
|
predeploy:
|
|
enabled: true
|
|
|
|
#Number of seconds a job should live after it is finished.
|
|
job_ttl: "3600"
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for predeploy
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "predeploy"
|
|
automountServiceAccountToken: true
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Sink-Agent Chart ##
|
|
######################################
|
|
|
|
sinkAgent:
|
|
enabled: true
|
|
|
|
image:
|
|
name: 1.13.1/sink-agent
|
|
buildTag: 1.13.1
|
|
|
|
######################################
|
|
## Service ##
|
|
######################################
|
|
|
|
service:
|
|
type: ClusterIP
|
|
ports:
|
|
- name: internal
|
|
protocol: TCP
|
|
port: 8080
|
|
targetPort: 8080
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
# sinkAgent: startupProbe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /q/health/started
|
|
port: internal
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 2
|
|
timeoutSeconds: 2
|
|
failureThreshold: 60
|
|
successThreshold: 1
|
|
|
|
# sinkAgent: readinessProbe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /q/health/ready
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
# sinkAgent: livenessProbe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /q/health/live
|
|
port: internal
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
preStopSleepSeconds: "40s"
|
|
terminationGracePeriodSeconds: 3600
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for inbound-worker
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "sink-agent"
|
|
automountServiceAccountToken: true
|
|
|
|
podAnnotations: {}
|
|
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
# reference the targets the sink-agent is allowed to reach
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidr here
|
|
|
|
######################################
|
|
## Capacity/Affinity ##
|
|
######################################
|
|
strategy:
|
|
type: "RollingUpdate"
|
|
rollingUpdate:
|
|
maxSurge: 100%
|
|
maxUnavailable: 0
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 1000m
|
|
memory: 1000Mi
|
|
requests:
|
|
cpu: 500m
|
|
memory: 500Mi
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
#Java_Opts
|
|
javaOpts: "-XX:+UseG1GC -XX:-G1PeriodicGCInvokesConcurrent -XX:G1PeriodicGCInterval=300000 -XX:MaxRAMPercentage=90"
|
|
|
|
######################################
|
|
## Retention Policy Configuration ##
|
|
######################################
|
|
# Default set to false
|
|
retention_job_purge_enabled: "true"
|
|
# Default set to 00:00 AM, and It is applicable only for MANAGED_SHARED Fusion
|
|
retention_job_purge_cron: "0 0 0 ? * * *"
|
|
# Mail trigger interval in seconds for sink agent web socket connection failure
|
|
mailTriggerInterval: 300
|
|
|
|
######################################
|
|
## Env ##
|
|
######################################
|
|
# Provide environment variables or override default application.properties
|
|
# Define key values in form of name and value, e.g.:
|
|
# env:
|
|
# - name: quarkus.log.level
|
|
# value: DEBUG
|
|
# @schema
|
|
# type: array
|
|
# items:
|
|
# oneOf:
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# value:
|
|
# type: string
|
|
# required: [name, value]
|
|
# additionalProperties: false
|
|
# - type: object
|
|
# properties:
|
|
# name:
|
|
# type: string
|
|
# valueFrom:
|
|
# type: object
|
|
# required: [name, valueFrom]
|
|
# additionalProperties: false
|
|
# @schema
|
|
env: []
|
|
######################################
|
|
## Fluent-Bit Chart ##
|
|
######################################
|
|
|
|
fluentBit:
|
|
enabled: false
|
|
|
|
# Default set to DaemonSet
|
|
# Possible Values : DaemonSet / Deployment
|
|
# Configure it as Deployment for environments where root permission is not allowed and Fluent-Bit is used only for offloading logs to other locations
|
|
kind: "DaemonSet"
|
|
|
|
image:
|
|
name: 1.13/fluent-bit
|
|
buildTag: 3.2.10-axway-2
|
|
|
|
nameOverride: ""
|
|
|
|
podSecurityContextEnabled: true
|
|
|
|
podSecurityContext:
|
|
supplementalGroups:
|
|
- 1001
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
fsGroup: 1001
|
|
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
hostNetwork: false
|
|
|
|
config:
|
|
service: ""
|
|
inputs: ""
|
|
filters: ""
|
|
outputs: ""
|
|
|
|
existingConfigMap: ""
|
|
|
|
env: []
|
|
|
|
envWithTpl: []
|
|
|
|
extraContainers: []
|
|
|
|
metricsPort: 2020
|
|
|
|
extraPorts: []
|
|
|
|
extraVolumes: []
|
|
|
|
extraVolumeMounts: []
|
|
|
|
labels: {}
|
|
|
|
annotations: {}
|
|
|
|
podAnnotations: {}
|
|
|
|
podLabels: {}
|
|
|
|
# @schema
|
|
# type: integer
|
|
# @schema
|
|
minReadySeconds: 0
|
|
|
|
daemonSetVolumes:
|
|
- name: varlog
|
|
hostPath:
|
|
path: /var/log
|
|
- name: varlibdockercontainers
|
|
hostPath:
|
|
path: /var/lib/docker/containers
|
|
- name: etcmachineid
|
|
hostPath:
|
|
path: /etc/machine-id
|
|
type: File
|
|
|
|
daemonSetVolumeMounts:
|
|
- name: varlog
|
|
mountPath: /var/log
|
|
- name: varlibdockercontainers
|
|
mountPath: /var/lib/docker/containers
|
|
readOnly: true
|
|
- name: etcmachineid
|
|
mountPath: /etc/machine-id
|
|
readOnly: true
|
|
|
|
initContainers: []
|
|
|
|
args:
|
|
- --workdir=/fluent-bit/etc
|
|
- --config=/fluent-bit/etc/conf/fluent-bit.conf
|
|
command:
|
|
- /fluent-bit/bin/fluent-bit
|
|
|
|
envFrom: []
|
|
|
|
updateStrategy:
|
|
rollingUpdate:
|
|
maxSurge: 0
|
|
maxUnavailable: 1
|
|
type: RollingUpdate
|
|
|
|
# fluentbit liveness probe
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /
|
|
port: http
|
|
scheme: HTTP
|
|
periodSeconds: 30
|
|
successThreshold: 1
|
|
timeoutSeconds: 10
|
|
initialDelaySeconds: 60
|
|
failureThreshold: 3
|
|
|
|
# fluentbit readiness probe
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /api/v1/health
|
|
port: http
|
|
scheme: HTTP
|
|
periodSeconds: 10
|
|
successThreshold: 1
|
|
timeoutSeconds: 5
|
|
initialDelaySeconds: 10
|
|
failureThreshold: 3
|
|
|
|
# fluentbit startup probe
|
|
startupProbe:
|
|
httpGet:
|
|
path: /api/v1/health
|
|
port: http
|
|
scheme: HTTP
|
|
periodSeconds: 10
|
|
successThreshold: 1
|
|
timeoutSeconds: 5
|
|
initialDelaySeconds: 30
|
|
failureThreshold: 3
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 200m
|
|
memory: 512Mi
|
|
requests:
|
|
cpu: 200m
|
|
memory: 512Mi
|
|
|
|
autoscaling:
|
|
enabled: true
|
|
minReplicas: 1
|
|
maxReplicas: 1
|
|
targetCPUUtilizationPercentage: 75
|
|
targetMemoryUtilizationPercentage: 75
|
|
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
minPods: 1
|
|
|
|
dnsPolicy: ClusterFirst
|
|
|
|
restartPolicy: Always
|
|
|
|
schedulerName: default-scheduler
|
|
|
|
securityContext:
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsUser: 10010
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsGroup: 10020
|
|
runAsNonRoot: true
|
|
allowPrivilegeEscalation: false
|
|
readOnlyRootFilesystem: true
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
|
|
serviceAccount:
|
|
enabled: true
|
|
preexisting: false
|
|
annotations: {}
|
|
name: fluent-bit
|
|
|
|
terminationGracePeriodSeconds: 30
|
|
|
|
###################################
|
|
## logrotate ##
|
|
###################################
|
|
logrotate:
|
|
# Default set to false
|
|
# Enable or disable log rotation
|
|
# To be enabled only if fluent-bit is used as a DaemonSet to manage all logs
|
|
enabled: false
|
|
# Schedule for running the log rotation job
|
|
schedule: "*/5 * * * *" # runs every 5 minutes
|
|
# Retention policy for rotated logs
|
|
retention: -1 # keep all logs
|
|
# Minimum size of the log file before it gets rotated
|
|
size: 10M # rotate when log file reaches 10MB
|
|
# file suffix for rotated log files in date format
|
|
fileSuffix: .%Y-%m-%d-%H-%M # suffix for rotated log files
|
|
|
|
# service account config
|
|
serviceAccount:
|
|
enabled: true
|
|
preexisting: false
|
|
annotations: {}
|
|
name: logrotate
|
|
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
securityContext:
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsUser: 10010
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
runAsGroup: 10020
|
|
runAsNonRoot: true
|
|
allowPrivilegeEscalation: false
|
|
readOnlyRootFilesystem: true
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
podSecurityContextEnabled: true
|
|
|
|
podSecurityContext:
|
|
supplementalGroups:
|
|
- 1001
|
|
# @schema
|
|
# type: [string, integer, null]
|
|
# @schema
|
|
fsGroup: 1001
|
|
|
|
#Number of seconds a job should live after it is finished.
|
|
job_ttl: "3600"
|
|
|
|
######################################
|
|
## Valkey ##
|
|
######################################
|
|
|
|
valkey:
|
|
enabled: true
|
|
|
|
client:
|
|
#deployment_model - clustered standalone or primary_replica
|
|
deployment_model: clustered
|
|
#subscription_mode - primary_only, replica_only - subscriptions to primary nodes only is default.
|
|
subscription_mode: primary_only
|
|
#read_mode - replica_first, primary_only, primary_and_replica - read from primary nodes only is default.
|
|
read_mode: primary_only
|
|
use_secure_connection: false
|
|
# security_mode: - NONE, ONLY_CA, STRICT
|
|
security_mode: STRICT
|
|
|
|
datagrid_username: default
|
|
datagrid_primary_connection_pool_min: 12 # Minimum number of connections maintained in the primary Valkey connection pool
|
|
datagrid_primary_connection_pool_max: 64 # Maximum number of connections allowed in the primary Valkey connection pool
|
|
datagrid_replica_connection_pool_min: 12 # Minimum number of connections maintained in the replica Valkey connection pool
|
|
datagrid_replica_connection_pool_max: 64 # Maximum number of connections allowed in the replica Valkey connection pool
|
|
datagrid_worker_threads: 32 # Number of worker threads configured in the datagrid client
|
|
datagrid_idle_connection_timeout_millis: 30000 # Timeout in milliseconds before idle connections are closed (10 seconds)
|
|
datagrid_cluster_connect_timeout_millis: 10000 # Timeout in milliseconds for establishing cluster connections (10 seconds)
|
|
datagrid_response_timeout_millis: 20000 # Timeout in milliseconds for waiting on command responses (10 seconds)
|
|
datagrid_number_of_retries: 3 # Number of retry attempts for failed operations before giving up
|
|
datagrid_retry_interval_millis: 5000 # Delay in milliseconds between retry attempts (5 seconds)
|
|
|
|
datagrid_subscriptions_per_connection: 5 # Number of subscriptions allowed per connection in the Valkey client
|
|
datagrid_subscription_connection_pool_size: 50 # Size of the subscription connection pool in the Valkey client
|
|
datagrid_subscription_connection_min_idle_size: 5 # Minimum idle connection pool size maintained in the subscription connections pool
|
|
datagrid_subscription_connection_timeout_millis: 7500 # Timeout in milliseconds for subscription connections (i.e 7.5 seconds)
|
|
|
|
hosts:
|
|
- hostname: valkey-headless
|
|
port: "6379"
|
|
|
|
external:
|
|
enabled: false
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
outboundRule:
|
|
# use this to reference a (preexisting) networkset
|
|
selector: role == 'any-address'
|
|
# or uncomment this to directly reference IP cidr instead
|
|
#nets:
|
|
# - 0.0.0.0/0 # put your subnet cidre here
|
|
|
|
internal:
|
|
enabled: true
|
|
######################################
|
|
## Network Policies ##
|
|
######################################
|
|
|
|
# Use the following block if you have Calico and want to use its network policies
|
|
# You will need the Calico API server to be installed as well
|
|
calicoNetpol:
|
|
enabled: false
|
|
|
|
######################################
|
|
## Cluster Configuration ##
|
|
######################################
|
|
clusterSize: 1 # Total nodes in cluster (masters + replicas)
|
|
replicasPerMaster: 0 # Number of replicas per master
|
|
|
|
######################################
|
|
## Image ##
|
|
######################################
|
|
|
|
image:
|
|
name: 1.12.2/valkey
|
|
buildTag: 8.1.4-alpine3.22-axway-1
|
|
|
|
######################################
|
|
## Environment ##
|
|
######################################
|
|
#name overrides
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
#20 seconds (time before marking a node as failed)
|
|
clusterNodeTimeout: 20000
|
|
#Persistence storage folder
|
|
valkeyDir: /data
|
|
#Will return errors when memory limit reached instead of evicting keys
|
|
maxMemoryPolicy: noeviction
|
|
#Process
|
|
#logfile: log file name
|
|
#(empty string means logs are sent to stdout)
|
|
#(e.g valkey.log) means logs are stored in valkeyDir/logs/valkey.log. Note log rotation is recommended when using file logging.
|
|
logFile: ""
|
|
logLevel: notice
|
|
|
|
######################################
|
|
## Persistence ##
|
|
######################################
|
|
persistence:
|
|
#claimTemplateRequestStorage storage request size for the claim template
|
|
claimTemplateRequestStorage: 5Gi
|
|
#claimTemplateStorageClass class that will dynamically create storage on desired cloud platform
|
|
#For AWS set this value to gp3
|
|
#For Azure set this value to managed-csi
|
|
claimTemplateStorageClass: "nfs-csi"
|
|
#claimTemplateName name of the claim for valkey storage
|
|
claimTemplateName: valkey-data
|
|
#claimTemplateAccessModes access mode for claim template
|
|
claimTemplateAccessModes: ReadWriteMany
|
|
#Append-only file logging is on for durability - set to "no" to disable
|
|
appendonly: "yes"
|
|
# RDB snapshotting configuration
|
|
# Don't define or set rdbSnapshots to empty array [] to disable RDB snapshots entirely
|
|
# Each entry defines: save after X seconds if at least Y keys changed
|
|
rdbSnapshots:
|
|
- seconds: 900
|
|
keyChanges: 1
|
|
- seconds: 300
|
|
keyChanges: 10
|
|
- seconds: 60
|
|
keyChanges: 10000
|
|
|
|
######################################
|
|
## Probes ##
|
|
######################################
|
|
startupProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 30
|
|
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 3
|
|
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 3
|
|
|
|
terminationGracePeriodSeconds: 60
|
|
|
|
######################################
|
|
## Security ##
|
|
######################################
|
|
|
|
podSecurityContextEnabled: false
|
|
podSecurityContext:
|
|
supplementalGroups: []
|
|
securityContext:
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: null
|
|
runAsGroup: null
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
#serviceaccount for valkey
|
|
serviceAccount:
|
|
enabled: true
|
|
# Use preexisting: true if you want to use a preexisting service account (e.g. created by the infra)
|
|
preexisting: false
|
|
annotations: {}
|
|
name: "valkey"
|
|
automountServiceAccountToken: true
|
|
|
|
podAnnotations: {}
|
|
|
|
podDisruptionBudget:
|
|
enabled: true
|
|
maxUnavailable: 1
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 1000m
|
|
memory: 3Gi
|
|
requests:
|
|
cpu: 1000m
|
|
memory: 3Gi
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|