From b4ef1c861dcff328a1b194992112ba9096059f97 Mon Sep 17 00:00:00 2001 From: Conan Scott Date: Wed, 24 Dec 2025 21:01:32 +1100 Subject: [PATCH] Cleanup --- README.md | 3 +- dm-smoke.yaml | 20 --- openebs-localpb-lvm-retain-cleanup.sh | 199 -------------------------- test-backup.yaml | 15 -- weekly-full-backup.yaml | 23 --- 5 files changed, 1 insertion(+), 259 deletions(-) delete mode 100644 dm-smoke.yaml delete mode 100755 openebs-localpb-lvm-retain-cleanup.sh delete mode 100644 test-backup.yaml delete mode 100644 weekly-full-backup.yaml diff --git a/README.md b/README.md index b829171..b6024a1 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,6 @@ -

YAML Config for OADP

- Data Protection Application - Config backup - Stateful backup of selected namespaces -- Test backup yaml \ No newline at end of file + diff --git a/dm-smoke.yaml b/dm-smoke.yaml deleted file mode 100644 index ba6ae2b..0000000 --- a/dm-smoke.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: dm-smoke - namespace: openshift-adp -spec: - includedNamespaces: - - gitea - - # Use your default BSL (you set default: true), but being explicit is fine: - storageLocation: default - - # Take CSI snapshots (not filesystem copy)… - snapshotVolumes: true - - # …and then move the snapshot data into object storage (Data Mover). - # This is the YAML equivalent of `--snapshot-move-data`. - snapshotMoveData: true - - ttl: 168h0m0s diff --git a/openebs-localpb-lvm-retain-cleanup.sh b/openebs-localpb-lvm-retain-cleanup.sh deleted file mode 100755 index 8e82764..0000000 --- a/openebs-localpb-lvm-retain-cleanup.sh +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SC="${SC:-local-nvme-retain}" -DO_REMOVE="${DO_REMOVE:-0}" # 0=dry-run, 1=delete -FORCE="${FORCE:-0}" # 0=prompt, 1=no prompt (only if DO_REMOVE=1) -VG_PIN="${VG_PIN:-}" # optionally pin expected VG (recommended) - -AUDIT_DIR="${AUDIT_DIR:-/var/tmp/openebs-localpv-lvm-cleanup}" - -usage() { - cat <] [--remove] [--force] [--vg ] - -Examples: - # Dry-run (auto-detect VG from PVs; refuses if multiple VGs) - $0 - - # Dry-run with pinned VG expectation - $0 --vg vg_nvme_local - - # Remove delta with prompt - $0 --vg vg_nvme_local --remove - - # Remove delta without prompt - $0 --vg vg_nvme_local --remove --force - -Env: - SC=local-nvme-retain - DO_REMOVE=0|1 - FORCE=0|1 - VG_PIN=vg_nvme_local - AUDIT_DIR=/var/tmp/... - -Requires: oc, jq, lvs, lvremove -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --sc) SC="$2"; shift 2;; - --remove) DO_REMOVE=1; shift;; - --force) FORCE=1; shift;; - --vg) VG_PIN="$2"; shift 2;; - -h|--help) usage; exit 0;; - *) echo "Unknown arg: $1" >&2; usage; exit 2;; - esac -done - -need() { command -v "$1" >/dev/null 2>&1 || { echo "Missing required command: $1" >&2; exit 1; }; } -need oc -need jq -need lvs -need lvremove - -ts="$(date +%Y%m%d-%H%M%S)" -run_dir="${AUDIT_DIR}/${ts}" -mkdir -p "$run_dir" - -log() { printf '%s %s\n' "[$(date +%H:%M:%S)]" "$*" | tee -a "$run_dir/run.log" >&2; } - -log "Start: SC=${SC} DO_REMOVE=${DO_REMOVE} FORCE=${FORCE} VG_PIN=${VG_PIN:-}" -log "Audit: ${run_dir}" - -# 1) Bound PVCs for SC -log "Collecting Bound PVCs for storage class '${SC}'" -oc get pvc -A -o json > "$run_dir/pvcs.json" - -jq -r --arg sc "$SC" ' - .items[] - | select(.spec.storageClassName==$sc) - | select(.status.phase=="Bound") - | [.metadata.namespace, .metadata.name, .spec.volumeName] - | @tsv -' "$run_dir/pvcs.json" > "$run_dir/pvcs.tsv" - -pvc_count="$(wc -l < "$run_dir/pvcs.tsv" | awk '{print $1}')" -if [[ "$pvc_count" -eq 0 ]]; then - log "No Bound PVCs found for SC=${SC}. Nothing to do." - exit 0 -fi -log "Bound PVCs found: ${pvc_count}" - -cut -f3 "$run_dir/pvcs.tsv" | sort -u > "$run_dir/inuse.pvs" -pv_count="$(wc -l < "$run_dir/inuse.pvs" | awk '{print $1}')" -log "Unique PVs referenced: ${pv_count}" - -# 2) Fetch PVs -log "Fetching PV objects" -: > "$run_dir/pvs.jsonl" -while read -r pv; do - oc get pv "$pv" -o json >> "$run_dir/pvs.jsonl" - echo >> "$run_dir/pvs.jsonl" -done < "$run_dir/inuse.pvs" - -# 3) Filter to OpenEBS localpv-lvm PVs and extract (vg, lv) -log "Extracting (vg, lv) for OpenEBS LocalPV-LVM PVs" -jq -r ' - select(.spec.csi.driver=="local.csi.openebs.io") - | select(.spec.csi.volumeAttributes["openebs.io/cas-type"]=="localpv-lvm") - | [ - (.spec.csi.volumeAttributes["openebs.io/volgroup"] // ""), - (.spec.csi.volumeHandle // "") - ] - | @tsv -' "$run_dir/pvs.jsonl" \ -| awk 'NF==2 && $1!="" && $2!="" {print}' \ -| sort -u \ -> "$run_dir/inuse.vg_lv.tsv" - -inuse_pairs="$(wc -l < "$run_dir/inuse.vg_lv.tsv" | awk '{print $1}')" -if [[ "$inuse_pairs" -eq 0 ]]; then - log "ERROR: Found no PVs matching OpenEBS localpv-lvm in the PV set for SC=${SC}." - log "Either SC doesn't correspond to localpv-lvm, or PVs are encoded differently." - exit 1 -fi -log "Found referenced OpenEBS localpv-lvm volumes: ${inuse_pairs}" - -cut -f1 "$run_dir/inuse.vg_lv.tsv" | sort -u > "$run_dir/inuse.vgs" -vg_count="$(wc -l < "$run_dir/inuse.vgs" | awk '{print $1}')" - -if [[ "$vg_count" -ne 1 ]]; then - log "ERROR: Referenced PVs span multiple VGs:" - cat "$run_dir/inuse.vgs" | tee -a "$run_dir/run.log" >&2 - log "Refusing to proceed without an explicit --vg to scope deletion." - exit 1 -fi - -VG="$(cat "$run_dir/inuse.vgs")" - -if [[ -n "$VG_PIN" && "$VG_PIN" != "$VG" ]]; then - log "ERROR: VG pin mismatch. PVs indicate VG='${VG}', but you pinned '${VG_PIN}'." - exit 1 -fi - -log "Target VG: ${VG}" - -cut -f2 "$run_dir/inuse.vg_lv.tsv" | sort -u > "$run_dir/inuse.lvs" -inuse_lv_count="$(wc -l < "$run_dir/inuse.lvs" | awk '{print $1}')" -log "Unique referenced LV names: ${inuse_lv_count}" - -# 4) Actual LVs in VG -log "Listing actual LVs in VG=${VG}" -lvs --noheadings -o lv_name "$VG" | awk '{$1=$1};1' | sort > "$run_dir/all.lvs" -all_lv_count="$(wc -l < "$run_dir/all.lvs" | awk '{print $1}')" -log "Total LVs in VG=${VG}: ${all_lv_count}" - -# 5) Delta -log "Computing delta (LVs in VG not referenced by Bound PVCs/PVs)" -comm -23 "$run_dir/all.lvs" "$run_dir/inuse.lvs" > "$run_dir/delta.lvs" -delta_count="$(wc -l < "$run_dir/delta.lvs" | awk '{print $1}')" -log "Delta candidates: ${delta_count}" - -echo -echo "=== SUMMARY (audit: $run_dir) ===" -echo "SC=${SC}" -echo "VG=${VG}" -echo "Bound PVCs in SC: ${pvc_count}" -echo "Unique PVs referenced: ${pv_count}" -echo "Referenced LVs (OpenEBS localpv-lvm): ${inuse_lv_count}" -echo "Total LVs in VG: ${all_lv_count}" -echo "Delta candidates: ${delta_count}" -echo - -if [[ "$delta_count" -eq 0 ]]; then - log "No delta found. Exiting." - exit 0 -fi - -echo "Delta list:" -cat "$run_dir/delta.lvs" -echo - -# 6) Remove if enabled -if [[ "$DO_REMOVE" -ne 1 ]]; then - log "Dry-run only. Re-run with --remove to delete delta LVs." - exit 0 -fi - -if [[ "$FORCE" -ne 1 ]]; then - echo "About to run lvremove on ${delta_count} LVs in VG=${VG}." - echo "Type EXACTLY: DELETE to proceed:" - read -r confirm - if [[ "$confirm" != "DELETE" ]]; then - log "Confirmation not received. Aborting." - exit 1 - fi -fi - -log "Removing delta LVs" -while read -r lv; do - [[ -z "$lv" ]] && continue - log "lvremove -y /dev/${VG}/${lv}" - lvremove -y "/dev/${VG}/${lv}" -done < "$run_dir/delta.lvs" - -log "Completed." -echo "Done. Audit: $run_dir" diff --git a/test-backup.yaml b/test-backup.yaml deleted file mode 100644 index 3f819f4..0000000 --- a/test-backup.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: velero.io/v1 -kind: Schedule -metadata: - name: 2mins - namespace: openshift-adp -spec: - schedule: "*/2 * * * *" - nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" - template: - hooks: {} - includedNamespaces: - - gitea - storageLocation: default - defaultVolumesToFsBackup: true - ttl: 0h1m0s diff --git a/weekly-full-backup.yaml b/weekly-full-backup.yaml deleted file mode 100644 index 126e14b..0000000 --- a/weekly-full-backup.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: velero.io/v1 -kind: Schedule -metadata: - name: weekly-broader-data - namespace: openshift-adp -spec: - # Every Sunday at 04:00 AEST - schedule: "0 17 * * 0" - - nameTemplate: "{{ .ScheduleName }}-{{ .Timestamp }}" - - template: - includedNamespaces: - - "*" - excludedNamespaces: - - peanutflix # Plex-like massive data namespaces - - openshift-logging # if present - - openshift-monitoring # noisy, large TSDB volumes - - snapshotVolumes: false - defaultVolumesToFsBackup: true - - ttl: 672h # 60 days