Initial commit: CloudOps infrastructure platform
This commit is contained in:
401
scripts/restore-myapps.sh
Executable file
401
scripts/restore-myapps.sh
Executable file
@@ -0,0 +1,401 @@
|
||||
#!/bin/bash
|
||||
# =============================================
|
||||
# restore-myapps.sh — Smart Restore Script
|
||||
#
|
||||
# Can run:
|
||||
# - Locally on the server/vm:
|
||||
# ./restore-myapps.sh
|
||||
#
|
||||
# - Remotely (from VM targeting the main server, or vice versa):
|
||||
# ./restore-myapps.sh --remote <IP> <USER> [--key /path/to/key | --password]
|
||||
#
|
||||
# Features:
|
||||
# - Skips containers that are already healthy/running
|
||||
# - Applies all known post-restore fixes per app
|
||||
# - Detects target IP automatically or uses provided one
|
||||
# - Works whether run locally or proxied over SSH
|
||||
# =============================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
# --------------------------------------------------
|
||||
# Parse arguments
|
||||
# --------------------------------------------------
|
||||
REMOTE_MODE=false
|
||||
REMOTE_IP=""
|
||||
REMOTE_USER="root"
|
||||
SSH_KEY=""
|
||||
SSH_PASSWORD=""
|
||||
USE_PASSWORD=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--remote)
|
||||
REMOTE_MODE=true
|
||||
REMOTE_IP="$2"
|
||||
REMOTE_USER="${3:-root}"
|
||||
shift 3
|
||||
;;
|
||||
--key)
|
||||
SSH_KEY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--password)
|
||||
USE_PASSWORD=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# --------------------------------------------------
|
||||
# If remote mode: copy this script + backup to target and run it there
|
||||
# --------------------------------------------------
|
||||
if [ "$REMOTE_MODE" = true ]; then
|
||||
if [ -z "$REMOTE_IP" ]; then
|
||||
echo "❌ --remote requires an IP address."
|
||||
echo " Usage: $0 --remote <IP> [USER] [--key /path/key]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REMOTE_DEST="/tmp/restore-session-$(date +%s)"
|
||||
|
||||
# Build SSH/SCP options
|
||||
SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=15"
|
||||
if [ -n "$SSH_KEY" ]; then
|
||||
SSH_OPTS="$SSH_OPTS -i $SSH_KEY"
|
||||
SCP_OPTS="$SSH_OPTS"
|
||||
elif [ "$USE_PASSWORD" = true ]; then
|
||||
if ! command -v sshpass &>/dev/null; then
|
||||
echo "❌ sshpass not installed. Install it with: apt install sshpass"
|
||||
exit 1
|
||||
fi
|
||||
read -s -p "SSH password for ${REMOTE_USER}@${REMOTE_IP}: " SSH_PASS
|
||||
echo ""
|
||||
SSH_CMD="sshpass -p '$SSH_PASS' ssh $SSH_OPTS"
|
||||
SCP_CMD="sshpass -p '$SSH_PASS' scp $SSH_OPTS"
|
||||
fi
|
||||
|
||||
SSH_CMD="ssh $SSH_OPTS ${REMOTE_USER}@${REMOTE_IP}"
|
||||
SCP_CMD="scp $SSH_OPTS"
|
||||
if [ -n "$SSH_KEY" ]; then
|
||||
SCP_CMD="scp -i $SSH_KEY $SSH_OPTS"
|
||||
fi
|
||||
|
||||
echo "========================================="
|
||||
echo "📡 REMOTE RESTORE MODE"
|
||||
echo " Target: ${REMOTE_USER}@${REMOTE_IP}"
|
||||
echo " Backup: $SCRIPT_DIR"
|
||||
echo "========================================="
|
||||
|
||||
echo ""
|
||||
echo "📤 Copying backup to remote server..."
|
||||
$SSH_CMD "mkdir -p $REMOTE_DEST"
|
||||
$SCP_CMD -r "$SCRIPT_DIR/." "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_DEST}/"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "❌ Failed to copy backup to remote server."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Backup copied."
|
||||
echo ""
|
||||
echo "🚀 Running restore on remote server..."
|
||||
$SSH_CMD "chmod +x $REMOTE_DEST/restore-myapps.sh && cd $REMOTE_DEST && bash restore-myapps.sh"
|
||||
|
||||
echo ""
|
||||
echo "🧹 Cleaning up remote temp files..."
|
||||
$SSH_CMD "rm -rf $REMOTE_DEST"
|
||||
|
||||
echo "========================================="
|
||||
echo "✅ Remote restore complete on $REMOTE_IP"
|
||||
echo "========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ===================================================
|
||||
# LOCAL RESTORE (runs directly on the target machine)
|
||||
# ===================================================
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Detect IP of this machine
|
||||
VM_IP=$(ip -4 addr show | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | grep -v 127.0.0.1 | head -1)
|
||||
|
||||
echo "========================================="
|
||||
echo "🔄 Smart Restore — LOCAL MODE"
|
||||
echo " Machine IP: $VM_IP"
|
||||
echo " Backup dir: $SCRIPT_DIR"
|
||||
echo "========================================="
|
||||
|
||||
# --------------------------------------------------
|
||||
# Helper: check if a container is healthy/running
|
||||
# Returns 0 (true) if container should be skipped
|
||||
# --------------------------------------------------
|
||||
container_is_healthy() {
|
||||
local name="$1"
|
||||
local status
|
||||
status=$(docker inspect --format='{{.State.Status}}' "$name" 2>/dev/null || echo "missing")
|
||||
local health
|
||||
health=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' "$name" 2>/dev/null || echo "none")
|
||||
|
||||
if [ "$status" = "running" ] && { [ "$health" = "healthy" ] || [ "$health" = "none" ]; }; then
|
||||
return 0 # healthy → skip
|
||||
fi
|
||||
return 1 # not healthy → restore
|
||||
}
|
||||
|
||||
# --------------------------------------------------
|
||||
# 1. Restore volumes — skip if container using it is healthy
|
||||
# --------------------------------------------------
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "📦 STEP 1 — Restoring Volumes"
|
||||
echo "========================================="
|
||||
|
||||
declare -A VOLUME_OWNERS=(
|
||||
["frappe-setup_frappe-sites"]="frappe-erpnext"
|
||||
["frappe-setup_mariadb-data"]="frappe-mariadb"
|
||||
["nextcloud-setup_nextcloud-data"]="nextcloud-app"
|
||||
["nextcloud-setup_nextcloud-db-data"]="nextcloud-db"
|
||||
["mautic-setup_mautic-data"]="mautic-app"
|
||||
["mautic-setup_mautic-db-data"]="mautic-mariadb"
|
||||
["n8n-setup_n8n-data"]="n8n-app"
|
||||
["n8n-setup_n8n-db-data"]="n8n-db"
|
||||
["odoo-clean_db-data"]="odoo-clean-db-1"
|
||||
["odoo-clean_odoo-etc"]="odoo-clean-odoo-1"
|
||||
)
|
||||
|
||||
cd "$SCRIPT_DIR/volumes"
|
||||
for backup in *.tar.gz; do
|
||||
[ -f "$backup" ] || continue
|
||||
volume=$(basename "$backup" .tar.gz)
|
||||
owner="${VOLUME_OWNERS[$volume]:-}"
|
||||
|
||||
# If the owning container is healthy, skip this volume
|
||||
if [ -n "$owner" ] && container_is_healthy "$owner"; then
|
||||
echo " ⏭️ $volume — container '$owner' is healthy, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo -n " 📁 Restoring $volume ... "
|
||||
docker volume create "$volume" &>/dev/null || true
|
||||
docker run --rm \
|
||||
-v "${volume}:/target" \
|
||||
-v "$(pwd):/backup" \
|
||||
alpine \
|
||||
sh -c "cd /target && tar xzf /backup/$backup" \
|
||||
&& echo "✅" || echo "⚠️ FAILED"
|
||||
done
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# --------------------------------------------------
|
||||
# 2. Start containers — skip healthy ones
|
||||
# --------------------------------------------------
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "🚀 STEP 2 — Starting Containers"
|
||||
echo "========================================="
|
||||
|
||||
declare -A APP_DIRS=(
|
||||
["Frappe"]="frappe-setup"
|
||||
["Odoo"]="odoo-clean"
|
||||
["Nextcloud"]="nextcloud-setup"
|
||||
["Mautic"]="mautic-setup"
|
||||
["n8n"]="n8n-setup"
|
||||
)
|
||||
|
||||
declare -A APP_MAIN_CONTAINER=(
|
||||
["Frappe"]="frappe-erpnext"
|
||||
["Odoo"]="odoo-clean-odoo-1"
|
||||
["Nextcloud"]="nextcloud-app"
|
||||
["Mautic"]="mautic-app"
|
||||
["n8n"]="n8n-app"
|
||||
)
|
||||
|
||||
cd "$SCRIPT_DIR/compose-files"
|
||||
for app in Frappe Odoo Nextcloud Mautic n8n; do
|
||||
dir="${APP_DIRS[$app]}"
|
||||
main_ctr="${APP_MAIN_CONTAINER[$app]}"
|
||||
|
||||
if container_is_healthy "$main_ctr"; then
|
||||
echo " ⏭️ $app — already running and healthy, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ -d "$dir" ]; then
|
||||
echo " 🚀 Starting $app..."
|
||||
cd "$dir"
|
||||
docker-compose up -d 2>&1 | tail -3
|
||||
cd ..
|
||||
else
|
||||
echo " ⏭️ $app compose dir '$dir' not found, skipping"
|
||||
fi
|
||||
done
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo ""
|
||||
echo "⏳ Waiting 60s for containers to initialize..."
|
||||
sleep 60
|
||||
|
||||
# --------------------------------------------------
|
||||
# 3. Post-restore fixes
|
||||
# --------------------------------------------------
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "🔧 STEP 3 — Applying Post-Restore Fixes"
|
||||
echo "========================================="
|
||||
|
||||
# ---- NEXTCLOUD ----
|
||||
echo ""
|
||||
echo "📌 Nextcloud — Trusted domains..."
|
||||
if container_is_healthy nextcloud-app; then
|
||||
echo " ⏭️ Nextcloud is healthy, skipping fix"
|
||||
else
|
||||
if docker ps | grep -q nextcloud-app; then
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:delete trusted_domains 1 2>/dev/null || true
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:delete trusted_domains 2 2>/dev/null || true
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:set trusted_domains 0 --value="localhost"
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:set trusted_domains 1 --value="$VM_IP"
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:set trusted_domains 2 --value="${VM_IP}:8082"
|
||||
docker exec nextcloud-app php /var/www/html/occ config:system:set trusted_domains 3 --value="localhost:8082"
|
||||
docker restart nextcloud-app
|
||||
echo " ✅ Nextcloud trusted domains fixed"
|
||||
else
|
||||
echo " ⚠️ nextcloud-app not running"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---- MAUTIC ----
|
||||
echo ""
|
||||
echo "📌 Mautic — Config + admin password..."
|
||||
if container_is_healthy mautic-app; then
|
||||
echo " ⏭️ Mautic is healthy, skipping fix"
|
||||
else
|
||||
if docker ps | grep -q mautic-app; then
|
||||
cat > /tmp/mautic-local.php << EOF
|
||||
<?php
|
||||
\$parameters = array(
|
||||
'db_driver' => 'pdo_mysql',
|
||||
'db_host' => 'mautic-mariadb',
|
||||
'db_port' => '3306',
|
||||
'db_name' => 'mautic',
|
||||
'db_user' => 'mautic',
|
||||
'db_password' => 'mautic123',
|
||||
'db_table_prefix' => '',
|
||||
'site_url' => 'http://${VM_IP}:8081'
|
||||
);
|
||||
EOF
|
||||
docker cp /tmp/mautic-local.php mautic-app:/var/www/html/config/local.php
|
||||
docker exec mautic-app touch /var/www/html/var/.installed 2>/dev/null || true
|
||||
docker exec mautic-app chown -R www-data:www-data /var/www/html/var /var/www/html/config 2>/dev/null || true
|
||||
docker restart mautic-app
|
||||
sleep 10
|
||||
|
||||
HASH=$(docker exec mautic-app php -r "echo password_hash('Admin!Password123', PASSWORD_BCRYPT);" 2>/dev/null || true)
|
||||
if [ -n "$HASH" ]; then
|
||||
docker exec mautic-mariadb mysql -uroot -pmautic_root_password \
|
||||
-e "USE mautic; UPDATE users SET password = '$HASH' WHERE username = 'admin';" 2>/dev/null || true
|
||||
echo " ✅ Admin password → Admin!Password123"
|
||||
fi
|
||||
docker restart mautic-app
|
||||
echo " ✅ Mautic fixed → http://${VM_IP}:8081/s/login"
|
||||
else
|
||||
echo " ⚠️ mautic-app not running"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---- ODOO ----
|
||||
echo ""
|
||||
echo "📌 Odoo — Assets + DB user..."
|
||||
if container_is_healthy odoo-clean-odoo-1; then
|
||||
echo " ⏭️ Odoo is healthy, skipping fix"
|
||||
else
|
||||
if docker ps | grep -q odoo-clean-db-1; then
|
||||
docker exec odoo-clean-db-1 psql -U odoo -d odoo \
|
||||
-c "DELETE FROM ir_attachment WHERE url LIKE '/web/assets/%';" 2>/dev/null || true
|
||||
docker exec odoo-clean-db-1 psql -U odoo \
|
||||
-c "ALTER USER odoo WITH PASSWORD 'odoo';" 2>/dev/null || true
|
||||
docker exec odoo-clean-odoo-1 bash -c \
|
||||
"grep -q 'filestore_check_missing' /etc/odoo/odoo.conf || echo 'filestore_check_missing = False' >> /etc/odoo/odoo.conf" 2>/dev/null || true
|
||||
docker restart odoo-clean-odoo-1
|
||||
echo " ✅ Odoo fixed → http://${VM_IP}:8069/web"
|
||||
else
|
||||
echo " ⚠️ odoo-clean-db-1 not running"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---- FRAPPE ----
|
||||
echo ""
|
||||
echo "📌 Frappe — DB permissions + cache + URL..."
|
||||
if container_is_healthy frappe-erpnext; then
|
||||
echo " ⏭️ Frappe is healthy, skipping fix"
|
||||
else
|
||||
if docker ps | grep -q frappe-erpnext && docker ps | grep -q frappe-mariadb; then
|
||||
SITE_CONFIG="/home/frappe/frappe-bench/sites/erpnext.navitrends.ovh/site_config.json"
|
||||
DB_NAME=$(docker exec frappe-erpnext cat "$SITE_CONFIG" 2>/dev/null \
|
||||
| grep -o '"db_name": *"[^"]*"' | cut -d'"' -f4)
|
||||
DB_PASS=$(docker exec frappe-erpnext cat "$SITE_CONFIG" 2>/dev/null \
|
||||
| grep -o '"db_password": *"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
if [ -n "$DB_NAME" ] && [ -n "$DB_PASS" ]; then
|
||||
echo " DB: $DB_NAME"
|
||||
|
||||
docker exec frappe-mariadb mysql -uroot -p123 -e "
|
||||
GRANT ALL PRIVILEGES ON *.* TO '${DB_NAME}'@'%' IDENTIFIED BY '${DB_PASS}' WITH GRANT OPTION;
|
||||
GRANT ALL PRIVILEGES ON *.* TO '${DB_NAME}'@'172.%' IDENTIFIED BY '${DB_PASS}' WITH GRANT OPTION;
|
||||
FLUSH PRIVILEGES;
|
||||
" 2>/dev/null && echo " ✅ DB permissions fixed" || echo " ⚠️ DB grant failed"
|
||||
|
||||
docker exec frappe-erpnext bash -c "
|
||||
cd /home/frappe/frappe-bench
|
||||
bench --site erpnext.navitrends.ovh set-config redis_cache 'redis://frappe-redis:6379'
|
||||
bench --site erpnext.navitrends.ovh set-config redis_queue 'redis://frappe-redis:6379'
|
||||
bench --site erpnext.navitrends.ovh set-config enable_scheduler 1
|
||||
bench --site erpnext.navitrends.ovh set-config site_url 'http://${VM_IP}:8080'
|
||||
bench --site erpnext.navitrends.ovh migrate
|
||||
bench --site erpnext.navitrends.ovh clear-cache
|
||||
" 2>/dev/null && echo " ✅ Frappe configured" || echo " ⚠️ Frappe config step had errors"
|
||||
|
||||
docker restart frappe-erpnext
|
||||
echo " ✅ Frappe fixed → http://${VM_IP}:8080"
|
||||
else
|
||||
echo " ⚠️ Could not read Frappe DB credentials"
|
||||
fi
|
||||
else
|
||||
echo " ⚠️ Frappe containers not running"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---- N8N ----
|
||||
echo ""
|
||||
echo "📌 n8n — Network check..."
|
||||
if container_is_healthy n8n-app; then
|
||||
echo " ⏭️ n8n is healthy, skipping fix"
|
||||
else
|
||||
docker network inspect integration-network &>/dev/null \
|
||||
|| docker network create integration-network && echo " ✅ Created integration-network"
|
||||
|
||||
if ! docker ps | grep -q n8n-app; then
|
||||
cd "$SCRIPT_DIR/compose-files/n8n-setup" && docker-compose up -d && cd "$SCRIPT_DIR"
|
||||
fi
|
||||
echo " ✅ n8n → http://${VM_IP}:5678"
|
||||
fi
|
||||
|
||||
# --------------------------------------------------
|
||||
# Summary
|
||||
# --------------------------------------------------
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "✅ RESTORE COMPLETE"
|
||||
echo "========================================="
|
||||
echo " Nextcloud → http://${VM_IP}:8082"
|
||||
echo " Mautic → http://${VM_IP}:8081/s/login (admin / Admin!Password123)"
|
||||
echo " Odoo → http://${VM_IP}:8069/web"
|
||||
echo " n8n → http://${VM_IP}:5678"
|
||||
echo " Frappe → http://${VM_IP}:8080"
|
||||
echo "========================================="
|
||||
Reference in New Issue
Block a user