first commit
This commit is contained in:
22
config-backups.yaml
Normal file
22
config-backups.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: velero.io/v1
|
||||
kind: Schedule
|
||||
metadata:
|
||||
name: daily-config
|
||||
namespace: openshift-adp
|
||||
spec:
|
||||
# Run at 02:00 daily
|
||||
schedule: "0 2 * * *"
|
||||
|
||||
# Make backups readable, sortable, unique
|
||||
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
|
||||
|
||||
template:
|
||||
includedNamespaces:
|
||||
- "*"
|
||||
|
||||
# No PV data — just manifests
|
||||
snapshotVolumes: false
|
||||
defaultVolumesToFsBackup: false
|
||||
|
||||
# Retain for one week (adjust later)
|
||||
ttl: 168h
|
||||
55
dpa.yaml
Normal file
55
dpa.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
apiVersion: oadp.openshift.io/v1alpha1
|
||||
kind: DataProtectionApplication
|
||||
metadata:
|
||||
name: dpa-minio
|
||||
namespace: openshift-adp
|
||||
spec:
|
||||
# 1) Don’t bother with internal image backup for your lab
|
||||
backupImages: false
|
||||
|
||||
backupLocations:
|
||||
- name: default
|
||||
velero:
|
||||
provider: aws
|
||||
default: true
|
||||
credential:
|
||||
name: cloud-credentials
|
||||
key: cloud
|
||||
objectStorage:
|
||||
bucket: oadp
|
||||
# 2) REQUIRED if bucket not dedicated to Velero, and also avoids that
|
||||
# "must have velero prefix when backupImages is not set to false" error.
|
||||
prefix: velero
|
||||
config:
|
||||
profile: default
|
||||
region: ap-southeast-2
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: http://minio.minio.svc.cluster.local:9000
|
||||
insecureSkipTLSVerify: "true"
|
||||
|
||||
configuration:
|
||||
# 3) New-style File System Backup: nodeAgent instead of `restic:` block
|
||||
nodeAgent:
|
||||
enable: true
|
||||
# Keep behaviour close to what you had before
|
||||
uploaderType: restic
|
||||
# If you want to pin node-agent pods, it MUST be nested like this:
|
||||
# podConfig:
|
||||
# nodeSelector:
|
||||
# node-role.kubernetes.io/worker: ""
|
||||
# # other pod options (tolerations, resources, etc.)
|
||||
velero:
|
||||
defaultPlugins:
|
||||
- openshift
|
||||
- aws
|
||||
disableFsBackup: false
|
||||
podConfig:
|
||||
resourceAllocations:
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "256Mi"
|
||||
|
||||
logFormat: text
|
||||
25
stateful-backups.yaml
Normal file
25
stateful-backups.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: velero.io/v1
|
||||
kind: Schedule
|
||||
metadata:
|
||||
name: daily-stateful
|
||||
namespace: openshift-adp
|
||||
spec:
|
||||
# Run at 03:00 (after config-only)
|
||||
schedule: "0 3 * * *"
|
||||
|
||||
# Timestamped names for long-term retention
|
||||
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
|
||||
|
||||
template:
|
||||
includedNamespaces:
|
||||
- gitea
|
||||
- gitea-actions
|
||||
- authentik
|
||||
# add others here as needed
|
||||
|
||||
# No CSI snapshots; use nodeAgent/restic only
|
||||
snapshotVolumes: false
|
||||
defaultVolumesToFsBackup: true
|
||||
|
||||
# Retain a bit longer
|
||||
ttl: 720h # 30 days
|
||||
15
test-backup.yaml
Normal file
15
test-backup.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: velero.io/v1
|
||||
kind: Schedule
|
||||
metadata:
|
||||
name: 2mins
|
||||
namespace: openshift-adp
|
||||
spec:
|
||||
schedule: "*/2 * * * *"
|
||||
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
|
||||
template:
|
||||
hooks: {}
|
||||
includedNamespaces:
|
||||
- mailhog
|
||||
storageLocation: default
|
||||
defaultVolumesToFsBackup: true
|
||||
ttl: 0h1m0s
|
||||
23
weekly-full-backup.yaml
Normal file
23
weekly-full-backup.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: velero.io/v1
|
||||
kind: Schedule
|
||||
metadata:
|
||||
name: weekly-broader-data
|
||||
namespace: openshift-adp
|
||||
spec:
|
||||
# Every Sunday at 04:00
|
||||
schedule: "0 4 * * 0"
|
||||
|
||||
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
|
||||
|
||||
template:
|
||||
includedNamespaces:
|
||||
- "*"
|
||||
excludedNamespaces:
|
||||
- peanutflix # Plex-like massive data namespaces
|
||||
- openshift-logging # if present
|
||||
- openshift-monitoring # noisy, large TSDB volumes
|
||||
|
||||
snapshotVolumes: false
|
||||
defaultVolumesToFsBackup: true
|
||||
|
||||
ttl: 672h # 60 days
|
||||
Reference in New Issue
Block a user