This commit is contained in:
2025-12-24 21:01:32 +11:00
parent f570587766
commit b4ef1c861d
5 changed files with 1 additions and 259 deletions

View File

@@ -1,7 +1,6 @@
<H1>YAML Config for OADP</H1> <H1>YAML Config for OADP</H1>
- Data Protection Application - Data Protection Application
- Config backup - Config backup
- Stateful backup of selected namespaces - Stateful backup of selected namespaces
- Test backup yaml

View File

@@ -1,20 +0,0 @@
apiVersion: velero.io/v1
kind: Backup
metadata:
name: dm-smoke
namespace: openshift-adp
spec:
includedNamespaces:
- gitea
# Use your default BSL (you set default: true), but being explicit is fine:
storageLocation: default
# Take CSI snapshots (not filesystem copy)…
snapshotVolumes: true
# …and then move the snapshot data into object storage (Data Mover).
# This is the YAML equivalent of `--snapshot-move-data`.
snapshotMoveData: true
ttl: 168h0m0s

View File

@@ -1,199 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SC="${SC:-local-nvme-retain}"
DO_REMOVE="${DO_REMOVE:-0}" # 0=dry-run, 1=delete
FORCE="${FORCE:-0}" # 0=prompt, 1=no prompt (only if DO_REMOVE=1)
VG_PIN="${VG_PIN:-}" # optionally pin expected VG (recommended)
AUDIT_DIR="${AUDIT_DIR:-/var/tmp/openebs-localpv-lvm-cleanup}"
usage() {
cat <<EOF
Usage:
$0 [--sc <storageclass>] [--remove] [--force] [--vg <vgname>]
Examples:
# Dry-run (auto-detect VG from PVs; refuses if multiple VGs)
$0
# Dry-run with pinned VG expectation
$0 --vg vg_nvme_local
# Remove delta with prompt
$0 --vg vg_nvme_local --remove
# Remove delta without prompt
$0 --vg vg_nvme_local --remove --force
Env:
SC=local-nvme-retain
DO_REMOVE=0|1
FORCE=0|1
VG_PIN=vg_nvme_local
AUDIT_DIR=/var/tmp/...
Requires: oc, jq, lvs, lvremove
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--sc) SC="$2"; shift 2;;
--remove) DO_REMOVE=1; shift;;
--force) FORCE=1; shift;;
--vg) VG_PIN="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "Unknown arg: $1" >&2; usage; exit 2;;
esac
done
need() { command -v "$1" >/dev/null 2>&1 || { echo "Missing required command: $1" >&2; exit 1; }; }
need oc
need jq
need lvs
need lvremove
ts="$(date +%Y%m%d-%H%M%S)"
run_dir="${AUDIT_DIR}/${ts}"
mkdir -p "$run_dir"
log() { printf '%s %s\n' "[$(date +%H:%M:%S)]" "$*" | tee -a "$run_dir/run.log" >&2; }
log "Start: SC=${SC} DO_REMOVE=${DO_REMOVE} FORCE=${FORCE} VG_PIN=${VG_PIN:-<none>}"
log "Audit: ${run_dir}"
# 1) Bound PVCs for SC
log "Collecting Bound PVCs for storage class '${SC}'"
oc get pvc -A -o json > "$run_dir/pvcs.json"
jq -r --arg sc "$SC" '
.items[]
| select(.spec.storageClassName==$sc)
| select(.status.phase=="Bound")
| [.metadata.namespace, .metadata.name, .spec.volumeName]
| @tsv
' "$run_dir/pvcs.json" > "$run_dir/pvcs.tsv"
pvc_count="$(wc -l < "$run_dir/pvcs.tsv" | awk '{print $1}')"
if [[ "$pvc_count" -eq 0 ]]; then
log "No Bound PVCs found for SC=${SC}. Nothing to do."
exit 0
fi
log "Bound PVCs found: ${pvc_count}"
cut -f3 "$run_dir/pvcs.tsv" | sort -u > "$run_dir/inuse.pvs"
pv_count="$(wc -l < "$run_dir/inuse.pvs" | awk '{print $1}')"
log "Unique PVs referenced: ${pv_count}"
# 2) Fetch PVs
log "Fetching PV objects"
: > "$run_dir/pvs.jsonl"
while read -r pv; do
oc get pv "$pv" -o json >> "$run_dir/pvs.jsonl"
echo >> "$run_dir/pvs.jsonl"
done < "$run_dir/inuse.pvs"
# 3) Filter to OpenEBS localpv-lvm PVs and extract (vg, lv)
log "Extracting (vg, lv) for OpenEBS LocalPV-LVM PVs"
jq -r '
select(.spec.csi.driver=="local.csi.openebs.io")
| select(.spec.csi.volumeAttributes["openebs.io/cas-type"]=="localpv-lvm")
| [
(.spec.csi.volumeAttributes["openebs.io/volgroup"] // ""),
(.spec.csi.volumeHandle // "")
]
| @tsv
' "$run_dir/pvs.jsonl" \
| awk 'NF==2 && $1!="" && $2!="" {print}' \
| sort -u \
> "$run_dir/inuse.vg_lv.tsv"
inuse_pairs="$(wc -l < "$run_dir/inuse.vg_lv.tsv" | awk '{print $1}')"
if [[ "$inuse_pairs" -eq 0 ]]; then
log "ERROR: Found no PVs matching OpenEBS localpv-lvm in the PV set for SC=${SC}."
log "Either SC doesn't correspond to localpv-lvm, or PVs are encoded differently."
exit 1
fi
log "Found referenced OpenEBS localpv-lvm volumes: ${inuse_pairs}"
cut -f1 "$run_dir/inuse.vg_lv.tsv" | sort -u > "$run_dir/inuse.vgs"
vg_count="$(wc -l < "$run_dir/inuse.vgs" | awk '{print $1}')"
if [[ "$vg_count" -ne 1 ]]; then
log "ERROR: Referenced PVs span multiple VGs:"
cat "$run_dir/inuse.vgs" | tee -a "$run_dir/run.log" >&2
log "Refusing to proceed without an explicit --vg to scope deletion."
exit 1
fi
VG="$(cat "$run_dir/inuse.vgs")"
if [[ -n "$VG_PIN" && "$VG_PIN" != "$VG" ]]; then
log "ERROR: VG pin mismatch. PVs indicate VG='${VG}', but you pinned '${VG_PIN}'."
exit 1
fi
log "Target VG: ${VG}"
cut -f2 "$run_dir/inuse.vg_lv.tsv" | sort -u > "$run_dir/inuse.lvs"
inuse_lv_count="$(wc -l < "$run_dir/inuse.lvs" | awk '{print $1}')"
log "Unique referenced LV names: ${inuse_lv_count}"
# 4) Actual LVs in VG
log "Listing actual LVs in VG=${VG}"
lvs --noheadings -o lv_name "$VG" | awk '{$1=$1};1' | sort > "$run_dir/all.lvs"
all_lv_count="$(wc -l < "$run_dir/all.lvs" | awk '{print $1}')"
log "Total LVs in VG=${VG}: ${all_lv_count}"
# 5) Delta
log "Computing delta (LVs in VG not referenced by Bound PVCs/PVs)"
comm -23 "$run_dir/all.lvs" "$run_dir/inuse.lvs" > "$run_dir/delta.lvs"
delta_count="$(wc -l < "$run_dir/delta.lvs" | awk '{print $1}')"
log "Delta candidates: ${delta_count}"
echo
echo "=== SUMMARY (audit: $run_dir) ==="
echo "SC=${SC}"
echo "VG=${VG}"
echo "Bound PVCs in SC: ${pvc_count}"
echo "Unique PVs referenced: ${pv_count}"
echo "Referenced LVs (OpenEBS localpv-lvm): ${inuse_lv_count}"
echo "Total LVs in VG: ${all_lv_count}"
echo "Delta candidates: ${delta_count}"
echo
if [[ "$delta_count" -eq 0 ]]; then
log "No delta found. Exiting."
exit 0
fi
echo "Delta list:"
cat "$run_dir/delta.lvs"
echo
# 6) Remove if enabled
if [[ "$DO_REMOVE" -ne 1 ]]; then
log "Dry-run only. Re-run with --remove to delete delta LVs."
exit 0
fi
if [[ "$FORCE" -ne 1 ]]; then
echo "About to run lvremove on ${delta_count} LVs in VG=${VG}."
echo "Type EXACTLY: DELETE to proceed:"
read -r confirm
if [[ "$confirm" != "DELETE" ]]; then
log "Confirmation not received. Aborting."
exit 1
fi
fi
log "Removing delta LVs"
while read -r lv; do
[[ -z "$lv" ]] && continue
log "lvremove -y /dev/${VG}/${lv}"
lvremove -y "/dev/${VG}/${lv}"
done < "$run_dir/delta.lvs"
log "Completed."
echo "Done. Audit: $run_dir"

View File

@@ -1,15 +0,0 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: 2mins
namespace: openshift-adp
spec:
schedule: "*/2 * * * *"
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
template:
hooks: {}
includedNamespaces:
- gitea
storageLocation: default
defaultVolumesToFsBackup: true
ttl: 0h1m0s

View File

@@ -1,23 +0,0 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: weekly-broader-data
namespace: openshift-adp
spec:
# Every Sunday at 04:00 AEST
schedule: "0 17 * * 0"
nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}"
template:
includedNamespaces:
- "*"
excludedNamespaces:
- peanutflix # Plex-like massive data namespaces
- openshift-logging # if present
- openshift-monitoring # noisy, large TSDB volumes
snapshotVolumes: false
defaultVolumesToFsBackup: true
ttl: 672h # 60 days