329 lines
12 KiB
Bash
Executable File
329 lines
12 KiB
Bash
Executable File
#!/bin/bash
|
|
# =============================================
|
|
# backup-myapps.sh — Run on MAIN SERVER
|
|
# Backs up: Frappe, Nextcloud, Mautic, n8n, Odoo
|
|
# Storage tiers:
|
|
# 1. Local → /root/backups/
|
|
# 2. VM → SSH tunnel → /backups/main-server/
|
|
# 3. Cloud → Cloudflare R2 (S3-compatible)
|
|
# Usage: ./backup-myapps.sh
|
|
# =============================================
|
|
|
|
set -euo pipefail
|
|
|
|
BACKUP_DATE=$(date +%Y%m%d_%H%M%S)
|
|
BACKUP_NAME="myapps-backup-${BACKUP_DATE}"
|
|
BACKUP_DIR="/root/backups/${BACKUP_NAME}"
|
|
BACKUP_ARCHIVE="/root/backups/${BACKUP_NAME}.tar.gz"
|
|
|
|
# SSH config to reach the VM (backup destination)
|
|
VM_USER="root"
|
|
VM_HOST="localhost"
|
|
VM_PORT="2223"
|
|
VM_KEY="/root/.ssh/contabo-key"
|
|
VM_DEST="/backups/main-server/"
|
|
|
|
# ── Cloudflare R2 config ─────────────────────────────────────────────────────
|
|
# Set these via environment or export before running the script
|
|
R2_ACCOUNT_ID="${R2_ACCOUNT_ID:-35e00c230cc8066252a2d9890b69aea2}"
|
|
R2_BUCKET="${R2_BUCKET_NAME:-navitrends-backups}"
|
|
R2_ENDPOINT="https://${R2_ACCOUNT_ID}.r2.cloudflarestorage.com"
|
|
# AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be exported in the env
|
|
# or set in /root/.r2-credentials (sourced below)
|
|
|
|
CREDENTIALS_FILE="/root/.r2-credentials"
|
|
if [ -f "$CREDENTIALS_FILE" ]; then
|
|
# shellcheck source=/dev/null
|
|
source "$CREDENTIALS_FILE"
|
|
fi
|
|
|
|
# Log file for backup status (used by boot-check script)
|
|
BACKUP_LOG_FILE="/root/backups/backup-status.log"
|
|
MAX_BACKUPS=10
|
|
|
|
# ── Write status to log ──────────────────────────────────────────────────────
|
|
log_status() {
|
|
local status="$1" # SUCCESS or FAILED
|
|
local name="$2"
|
|
local msg="${3:-}"
|
|
echo "$(date '+%Y-%m-%d %H:%M:%S') | ${status} | ${name} | ${msg}" >> "$BACKUP_LOG_FILE"
|
|
}
|
|
|
|
echo "========================================="
|
|
echo "📦 Starting Backup: $BACKUP_NAME"
|
|
echo " Apps: Frappe, Nextcloud, Mautic, n8n, Odoo"
|
|
echo " Tiers: Local → VM → ☁ Cloudflare R2"
|
|
echo "========================================="
|
|
|
|
mkdir -p "$BACKUP_DIR"
|
|
mkdir -p "/root/backups"
|
|
cd "$BACKUP_DIR"
|
|
|
|
# --------------------------------------------------
|
|
# 1. Docker container list (filtered to your apps)
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "📋 [1/7] Saving Docker container list..."
|
|
docker ps -a --filter "name=frappe" \
|
|
--filter "name=nextcloud" \
|
|
--filter "name=mautic" \
|
|
--filter "name=n8n" \
|
|
--filter "name=odoo" \
|
|
--format "table {{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}" \
|
|
> docker-containers.txt 2>/dev/null || true
|
|
echo " ✅ Done"
|
|
|
|
# --------------------------------------------------
|
|
# 2. docker-compose files
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "📄 [2/7] Saving docker-compose files..."
|
|
mkdir -p compose-files
|
|
|
|
for app in frappe-setup odoo-clean nextcloud-setup mautic-setup n8n-setup; do
|
|
if [ -d ~/$app ]; then
|
|
cp -r ~/$app compose-files/ && echo " ✅ $app" || echo " ⚠️ $app copy failed"
|
|
else
|
|
echo " ⏭️ $app not found — skipping"
|
|
fi
|
|
done
|
|
|
|
# --------------------------------------------------
|
|
# 3. Docker volumes
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "💾 [3/7] Backing up Docker volumes..."
|
|
mkdir -p volumes
|
|
|
|
VOLUMES=(
|
|
"frappe-setup_frappe-sites"
|
|
"frappe-setup_mariadb-data"
|
|
"nextcloud-setup_nextcloud-data"
|
|
"nextcloud-setup_nextcloud-db-data"
|
|
"mautic-setup_mautic-data"
|
|
"mautic-setup_mautic-db-data"
|
|
"n8n-setup_n8n-data"
|
|
"n8n-setup_n8n-db-data"
|
|
"odoo-clean_db-data"
|
|
"odoo-clean_odoo-etc"
|
|
)
|
|
|
|
for volume in "${VOLUMES[@]}"; do
|
|
if ! docker volume inspect "$volume" &>/dev/null; then
|
|
echo " ⏭️ $volume — not found, skipping"
|
|
continue
|
|
fi
|
|
echo -n " 📁 $volume ... "
|
|
docker run --rm \
|
|
-v "${volume}:/source:ro" \
|
|
-v "$(pwd)/volumes:/backup" \
|
|
alpine \
|
|
tar czf "/backup/${volume}.tar.gz" -C /source . \
|
|
&& echo "✅" || echo "⚠️ FAILED"
|
|
done
|
|
|
|
# --------------------------------------------------
|
|
# 4. Container inspect configs
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "🔧 [4/7] Saving container inspect configs..."
|
|
mkdir -p container-configs
|
|
COUNT=0
|
|
while IFS= read -r cid; do
|
|
name=$(docker inspect --format='{{.Name}}' "$cid" | sed 's/\///')
|
|
docker inspect "$cid" > "container-configs/${name}.json" 2>/dev/null && COUNT=$((COUNT+1))
|
|
done < <(docker ps -a --filter "name=frappe" \
|
|
--filter "name=nextcloud" \
|
|
--filter "name=mautic" \
|
|
--filter "name=n8n" \
|
|
--filter "name=odoo" \
|
|
--format "{{.ID}}")
|
|
echo " ✅ Saved $COUNT container configs"
|
|
|
|
# --------------------------------------------------
|
|
# 5. Extract important app config files
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "⚙️ [5/7] Extracting app config files..."
|
|
mkdir -p configs
|
|
|
|
docker run --rm \
|
|
-v nextcloud-setup_nextcloud-data:/source:ro \
|
|
alpine cat /source/config/config.php > configs/nextcloud-config.php 2>/dev/null \
|
|
&& echo " ✅ Nextcloud config.php" \
|
|
|| echo " ⏭️ Nextcloud config not found"
|
|
|
|
docker exec frappe-erpnext \
|
|
cat /home/frappe/frappe-bench/sites/erpnext.navitrends.ovh/site_config.json \
|
|
> configs/frappe-site_config.json 2>/dev/null \
|
|
&& echo " ✅ Frappe site_config.json" \
|
|
|| echo " ⏭️ Frappe config not found"
|
|
|
|
# --------------------------------------------------
|
|
# 6. Backup metadata + checksum
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "📝 [6/7] Writing backup metadata..."
|
|
VOLUME_COUNT=$(ls volumes/*.tar.gz 2>/dev/null | wc -l)
|
|
cat > backup-info.txt << EOF
|
|
Backup Name: $BACKUP_NAME
|
|
Backup Date: $(date)
|
|
Hostname: $(hostname)
|
|
Apps: Frappe, Nextcloud, Mautic, n8n, Odoo
|
|
Volumes: $VOLUME_COUNT volume(s) backed up
|
|
Docker info: $(docker --version)
|
|
Storage Tiers:
|
|
- Local: /root/backups/
|
|
- VM: ${VM_HOST}:${VM_PORT} → ${VM_DEST}
|
|
- Cloud: Cloudflare R2 → s3://${R2_BUCKET}/backups/
|
|
|
|
Volumes included:
|
|
$(ls volumes/*.tar.gz 2>/dev/null | xargs -I{} basename {} || echo "none")
|
|
EOF
|
|
|
|
echo "" >> backup-info.txt
|
|
echo "Volume SHA256 checksums:" >> backup-info.txt
|
|
for f in volumes/*.tar.gz; do
|
|
[ -f "$f" ] && sha256sum "$f" | awk '{print $1 " " $2}' >> backup-info.txt || true
|
|
done
|
|
|
|
echo " ✅ Done"
|
|
|
|
# --------------------------------------------------
|
|
# 7. Compress the backup
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "🗜️ [7/7] Compressing backup..."
|
|
cd /root/backups
|
|
tar -czf "${BACKUP_NAME}.tar.gz" "${BACKUP_NAME}/"
|
|
COMPRESSED_SIZE=$(du -h "${BACKUP_NAME}.tar.gz" | cut -f1)
|
|
echo " ✅ Compressed size: $COMPRESSED_SIZE → $BACKUP_ARCHIVE"
|
|
|
|
sha256sum "${BACKUP_NAME}.tar.gz" > "${BACKUP_NAME}.tar.gz.sha256"
|
|
echo " ✅ Checksum written: ${BACKUP_NAME}.tar.gz.sha256"
|
|
|
|
rm -rf "$BACKUP_DIR"
|
|
|
|
# --------------------------------------------------
|
|
# 8. Retention — keep only the latest MAX_BACKUPS
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "🧹 [Retention] Keeping latest ${MAX_BACKUPS} backups..."
|
|
ARCHIVE_LIST=$(ls -t /root/backups/myapps-backup-*.tar.gz 2>/dev/null || true)
|
|
ARCHIVE_COUNT=$(echo "$ARCHIVE_LIST" | grep -c '.tar.gz' || true)
|
|
|
|
if [ "$ARCHIVE_COUNT" -gt "$MAX_BACKUPS" ]; then
|
|
TO_DELETE=$(echo "$ARCHIVE_LIST" | tail -n +$((MAX_BACKUPS + 1)))
|
|
while IFS= read -r old_file; do
|
|
[ -z "$old_file" ] && continue
|
|
rm -f "$old_file"
|
|
rm -f "${old_file}.sha256"
|
|
echo " 🗑️ Deleted old backup: $(basename $old_file)"
|
|
done <<< "$TO_DELETE"
|
|
else
|
|
echo " ✅ ${ARCHIVE_COUNT}/${MAX_BACKUPS} backups — nothing to prune"
|
|
fi
|
|
|
|
# --------------------------------------------------
|
|
# 9. Send to VM over SSH [TIER 2]
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "📤 [TIER 2] Sending backup to VM (${VM_HOST}:${VM_PORT})..."
|
|
scp -i "$VM_KEY" \
|
|
-P "$VM_PORT" \
|
|
-o StrictHostKeyChecking=no \
|
|
-o ConnectTimeout=15 \
|
|
"${BACKUP_NAME}.tar.gz" \
|
|
"${VM_USER}@${VM_HOST}:${VM_DEST}"
|
|
|
|
if [ $? -eq 0 ]; then
|
|
echo " ✅ Backup sent to VM successfully!"
|
|
scp -i "$VM_KEY" -P "$VM_PORT" -o StrictHostKeyChecking=no \
|
|
"${BACKUP_NAME}.tar.gz.sha256" "${VM_USER}@${VM_HOST}:${VM_DEST}" 2>/dev/null || true
|
|
else
|
|
echo " ❌ VM transfer failed. Local backup still at: $BACKUP_ARCHIVE"
|
|
fi
|
|
|
|
# --------------------------------------------------
|
|
# 10. Upload to Cloudflare R2 [TIER 3 — CLOUD]
|
|
# --------------------------------------------------
|
|
echo ""
|
|
echo "☁️ [TIER 3] Uploading to Cloudflare R2..."
|
|
echo " Bucket: ${R2_BUCKET}"
|
|
echo " Endpoint: ${R2_ENDPOINT}"
|
|
|
|
# Check if AWS CLI is available (used for S3-compatible uploads)
|
|
if command -v aws &>/dev/null && [ -n "${AWS_ACCESS_KEY_ID:-}" ] && [ -n "${AWS_SECRET_ACCESS_KEY:-}" ]; then
|
|
# Upload main archive
|
|
aws s3 cp "${BACKUP_NAME}.tar.gz" \
|
|
"s3://${R2_BUCKET}/backups/${BACKUP_NAME}.tar.gz" \
|
|
--endpoint-url "${R2_ENDPOINT}" \
|
|
--no-progress \
|
|
&& echo " ✅ R2 upload complete: s3://${R2_BUCKET}/backups/${BACKUP_NAME}.tar.gz" \
|
|
|| echo " ❌ R2 upload failed"
|
|
|
|
# Upload checksum
|
|
aws s3 cp "${BACKUP_NAME}.tar.gz.sha256" \
|
|
"s3://${R2_BUCKET}/backups/${BACKUP_NAME}.tar.gz.sha256" \
|
|
--endpoint-url "${R2_ENDPOINT}" \
|
|
--no-prth
|
|
os.environ.setdefault('R2_ACCOUNT_ID', '${R2_ACCOUNT_ID}')
|
|
os.environ.setdefault('R2_BUCKET_NAME', '${R2_BUCKET}')
|
|
try:
|
|
import boto3
|
|
client = boto3.client(
|
|
's3',
|
|
endpoint_url='${R2_ENDPOINT}',
|
|
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID',''),
|
|
aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY',''),
|
|
region_name='auto'
|
|
)
|
|
archive = '/root/backups/${BACKUP_NAME}.tar.gz'
|
|
key = 'backups/${BACKUP_NAME}.tar.gz'
|
|
size = os.path.getsize(archive)
|
|
print(f' Uploading {size/1024/1024:.1f} MB...')
|
|
client.upload_file(archive, '${R2_BUCKET}', key)
|
|
print(' ✅ R2 upload complete (boto3 fallback)')
|
|
except Exception as e:
|
|
print(f' ❌ R2 upload failed: {e}')
|
|
sys.exit(1)
|
|
PYEOF
|
|
R2_STATUS="uploaded via boto3"
|
|
else
|
|
echo " ⚠️ Skipping R2 upload: neither aws-cli nor python3/boto3 available"
|
|
echo " 💡 Install: pip install boto3 OR apt install awscli"
|
|
echo " 💡 Then set: export AWS_ACCESS_KEY_ID=... AWS_SECRET_ACCESS_KEY=..."
|
|
R2_STATUS="skipped (no cli available)"
|
|
fi
|
|
|
|
# --------------------------------------------------
|
|
# 11. Write final status to log
|
|
# --------------------------------------------------
|
|
log_status "SUCCESS" "$BACKUP_NAME" "size=${COMPRESSED_SIZE} | r2=${R2_STATUS}"
|
|
|
|
echo ""
|
|
echo "========================================="
|
|
echo "✅ BACKUP COMPLETE — 3-tier storage"
|
|
echo ""
|
|
echo " 📦 Name: $BACKUP_NAME"
|
|
echo " 💾 Local: $BACKUP_ARCHIVE ($COMPRESSED_SIZE)"
|
|
echo " 🖥️ VM: ${VM_HOST}:${VM_DEST}${BACKUP_NAME}.tar.gz"
|
|
echo " ☁️ R2: s3://${R2_BUCKET}/backups/${BACKUP_NAME}.tar.gz"
|
|
echo "========================================="
|
|
|
|
# ── AES-256 encryption (optional, call manually) ─────────────────────────────
|
|
encrypt_backup() {
|
|
echo "🔐 Chiffrement AES-256..."
|
|
openssl enc -aes-256-cbc -pbkdf2 -pass pass:Navitrends2024! \
|
|
-in "$BACKUP_ARCHIVE" \
|
|
-out "${BACKUP_ARCHIVE}.enc"
|
|
rm -f "$BACKUP_ARCHIVE"
|
|
echo "✅ Archive chiffrée : ${BACKUP_ARCHIVE}.enc"
|
|
}
|
|
|
|
notify_failure() {
|
|
echo "📧 Envoi notification échec..."
|
|
echo "Backup FAILED: $BACKUP_NAME" | \
|
|
mail -s "[Navitrends] BACKUP FAILED - $(date)" ameniboukottaya@gmail.com
|
|
}
|