commit 5b578e8543ba1cb9662aced714ca0fd821ba769e Author: Conan Scott Date: Tue Dec 9 15:30:35 2025 +1100 first commit diff --git a/config-backups.yaml b/config-backups.yaml new file mode 100644 index 0000000..13dccbf --- /dev/null +++ b/config-backups.yaml @@ -0,0 +1,22 @@ +apiVersion: velero.io/v1 +kind: Schedule +metadata: + name: daily-config + namespace: openshift-adp +spec: + # Run at 02:00 daily + schedule: "0 2 * * *" + + # Make backups readable, sortable, unique + nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" + + template: + includedNamespaces: + - "*" + + # No PV data — just manifests + snapshotVolumes: false + defaultVolumesToFsBackup: false + + # Retain for one week (adjust later) + ttl: 168h diff --git a/dpa.yaml b/dpa.yaml new file mode 100644 index 0000000..7e61565 --- /dev/null +++ b/dpa.yaml @@ -0,0 +1,55 @@ +apiVersion: oadp.openshift.io/v1alpha1 +kind: DataProtectionApplication +metadata: + name: dpa-minio + namespace: openshift-adp +spec: + # 1) Don’t bother with internal image backup for your lab + backupImages: false + + backupLocations: + - name: default + velero: + provider: aws + default: true + credential: + name: cloud-credentials + key: cloud + objectStorage: + bucket: oadp + # 2) REQUIRED if bucket not dedicated to Velero, and also avoids that + # "must have velero prefix when backupImages is not set to false" error. + prefix: velero + config: + profile: default + region: ap-southeast-2 + s3ForcePathStyle: "true" + s3Url: http://minio.minio.svc.cluster.local:9000 + insecureSkipTLSVerify: "true" + + configuration: + # 3) New-style File System Backup: nodeAgent instead of `restic:` block + nodeAgent: + enable: true + # Keep behaviour close to what you had before + uploaderType: restic + # If you want to pin node-agent pods, it MUST be nested like this: + # podConfig: + # nodeSelector: + # node-role.kubernetes.io/worker: "" + # # other pod options (tolerations, resources, etc.) + velero: + defaultPlugins: + - openshift + - aws + disableFsBackup: false + podConfig: + resourceAllocations: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "256Mi" + + logFormat: text diff --git a/stateful-backups.yaml b/stateful-backups.yaml new file mode 100644 index 0000000..a6da91e --- /dev/null +++ b/stateful-backups.yaml @@ -0,0 +1,25 @@ +apiVersion: velero.io/v1 +kind: Schedule +metadata: + name: daily-stateful + namespace: openshift-adp +spec: + # Run at 03:00 (after config-only) + schedule: "0 3 * * *" + + # Timestamped names for long-term retention + nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" + + template: + includedNamespaces: + - gitea + - gitea-actions + - authentik + # add others here as needed + + # No CSI snapshots; use nodeAgent/restic only + snapshotVolumes: false + defaultVolumesToFsBackup: true + + # Retain a bit longer + ttl: 720h # 30 days diff --git a/test-backup.yaml b/test-backup.yaml new file mode 100644 index 0000000..2c964fc --- /dev/null +++ b/test-backup.yaml @@ -0,0 +1,15 @@ +apiVersion: velero.io/v1 +kind: Schedule +metadata: + name: 2mins + namespace: openshift-adp +spec: + schedule: "*/2 * * * *" + nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" + template: + hooks: {} + includedNamespaces: + - mailhog + storageLocation: default + defaultVolumesToFsBackup: true + ttl: 0h1m0s diff --git a/weekly-full-backup.yaml b/weekly-full-backup.yaml new file mode 100644 index 0000000..1dccf2b --- /dev/null +++ b/weekly-full-backup.yaml @@ -0,0 +1,23 @@ +apiVersion: velero.io/v1 +kind: Schedule +metadata: + name: weekly-broader-data + namespace: openshift-adp +spec: + # Every Sunday at 04:00 + schedule: "0 4 * * 0" + + nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" + + template: + includedNamespaces: + - "*" + excludedNamespaces: + - peanutflix # Plex-like massive data namespaces + - openshift-logging # if present + - openshift-monitoring # noisy, large TSDB volumes + + snapshotVolumes: false + defaultVolumesToFsBackup: true + + ttl: 672h # 60 days