Sync from main server - 2026-05-05 00:20:15

This commit is contained in:
root
2026-05-05 00:20:15 +02:00
parent a8db6b5fa2
commit 09bbe0403c
5 changed files with 560 additions and 336 deletions

View File

@@ -30,8 +30,11 @@ from modules.users import (
app = Flask(__name__) app = Flask(__name__)
app.secret_key = 'navitrends-secret-key-2025' app.secret_key = 'navitrends-secret-key-2025'
# Increase default timeout for slow VM→main-server SSH calls
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
restore_jobs = {} restore_jobs = {}
backup_jobs = {} # for manual backup runs backup_jobs = {}
def _stream_restore(job_id, cmd): def _stream_restore(job_id, cmd):
@@ -53,7 +56,6 @@ def _stream_restore(job_id, cmd):
def _stream_backup(job_id, script_path): def _stream_backup(job_id, script_path):
"""Run the backup script and stream its output into backup_jobs."""
backup_jobs[job_id] = {'status': 'running', 'log': [], 'started': time.time()} backup_jobs[job_id] = {'status': 'running', 'log': [], 'started': time.time()}
try: try:
proc = subprocess.Popen( proc = subprocess.Popen(
@@ -73,16 +75,27 @@ def _stream_backup(job_id, script_path):
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
# DASHBOARD # DASHBOARD
# Loads instantly — all heavy data fetched async via JS after page renders
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
@app.route('/') @app.route('/')
@login_required @login_required
def dashboard(): def dashboard():
containers = get_containers() # On the VM: skip slow SSH calls at page load — JS fetches them async via /api/dashboard
running_count = sum(1 for c in containers if 'Up' in c.get('status', '')) # On the main server: fetch everything normally (local calls, no SSH delay)
backups = get_local_backups() backups = get_local_backups()
vm_backups = get_vm_backups() vm_backups = get_vm_backups()
if RUNNING_ON_MAIN_SERVER:
containers = get_containers()
running_count = sum(1 for c in containers if 'Up' in c.get('status', ''))
system = get_system_info() system = get_system_info()
users = get_all_users() users = get_all_users()
else:
containers = [] # loaded async by JS via /api/dashboard
running_count = 0
system = {}
users = []
return render_template('pages/dashboard.html', return render_template('pages/dashboard.html',
containers=containers, containers=containers,
running_count=running_count, running_count=running_count,
@@ -145,9 +158,12 @@ def restore_page():
@app.route('/users') @app.route('/users')
@login_required @login_required
def users_page(): def users_page():
# On VM: skip slow SSH call — JS loads users async via /api/users
# On main server: fetch normally (local, fast)
users = get_all_users() if RUNNING_ON_MAIN_SERVER else []
return render_template( return render_template(
'pages/users.html', 'pages/users.html',
users=get_all_users(), users=users,
main_server=MAIN_SERVER_IP, main_server=MAIN_SERVER_IP,
active_page='users', active_page='users',
page_title='User Management', page_title='User Management',
@@ -158,10 +174,13 @@ def users_page():
@app.route('/settings') @app.route('/settings')
@login_required @login_required
def settings_page(): def settings_page():
# On VM: skip slow SSH call — JS loads system info async via /api/system
# On main server: fetch normally (local, fast)
system = get_system_info() if RUNNING_ON_MAIN_SERVER else {}
return render_template( return render_template(
'pages/settings.html', 'pages/settings.html',
main_server=MAIN_SERVER_IP, main_server=MAIN_SERVER_IP,
system=get_system_info(), system=system,
running_on_main=RUNNING_ON_MAIN_SERVER, running_on_main=RUNNING_ON_MAIN_SERVER,
active_page='settings', active_page='settings',
page_title='Settings', page_title='Settings',
@@ -206,7 +225,7 @@ def api_containers_all():
@app.route('/api/nav-summary') @app.route('/api/nav-summary')
@login_required @login_required
def api_nav_summary(): def api_nav_summary():
"""Lightweight counts for sidebar badges on every page (one round trip).""" """Lightweight counts for sidebar badges (one round trip)."""
root_ctrs = get_all_root_containers() root_ctrs = get_all_root_containers()
user_ctrs = get_rootless_user_containers_remote() user_ctrs = get_rootless_user_containers_remote()
all_ctrs = root_ctrs + user_ctrs all_ctrs = root_ctrs + user_ctrs
@@ -217,17 +236,39 @@ def api_nav_summary():
}) })
# ─────────────────────────────────────────────
# API — dashboard summary (fast async load)
# ─────────────────────────────────────────────
@app.route('/api/dashboard')
@login_required
def api_dashboard():
"""
Single endpoint the dashboard JS calls after page render.
Returns system info + container summary + user count in one shot.
"""
system = get_system_info()
root_ctrs = get_all_root_containers()
user_ctrs = get_rootless_user_containers_remote()
all_ctrs = root_ctrs + user_ctrs
users = get_all_users()
running = sum(1 for c in all_ctrs if 'Up' in c.get('status', ''))
return jsonify({
'system': system,
'containers': all_ctrs,
'running_count': running,
'user_count': len(users),
'local_backups': len(get_local_backups()),
'vm_backups': len(get_vm_backups()),
})
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
# API — container actions # API — container actions
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
@app.route('/api/container/action', methods=['POST']) @app.route('/api/container/action', methods=['POST'])
@login_required @login_required
def api_container_action(): def api_container_action():
"""
POST JSON: { "name": "container-name", "action": "start|stop|restart" }
Runs the action, then immediately returns the NEW container status so the
UI can update without waiting for the next 15-second refresh cycle.
"""
data = request.get_json() or {} data = request.get_json() or {}
name = data.get('name', '').strip() name = data.get('name', '').strip()
action = data.get('action', '').strip() action = data.get('action', '').strip()
@@ -236,26 +277,20 @@ def api_container_action():
return jsonify({'success': False, 'message': 'name and action required'}), 400 return jsonify({'success': False, 'message': 'name and action required'}), 400
success, output = container_action(name, action) success, output = container_action(name, action)
# Give Docker a moment to settle, then fetch the real status
time.sleep(1.5) time.sleep(1.5)
status_info = get_container_status(name) status_info = get_container_status(name)
return jsonify({ return jsonify({
'success': success, 'success': success,
'output': output, 'output': output,
'new_status': status_info['status'], # 'running' | 'stopped' | 'unknown' 'new_status': status_info['status'],
'new_status_raw': status_info['raw'], 'new_status_raw': status_info['raw'],
}) })
# ─────────────────────────────────────────────
# API — single container status (for polling)
# ─────────────────────────────────────────────
@app.route('/api/container/status/<name>') @app.route('/api/container/status/<name>')
@login_required @login_required
def api_container_status(name): def api_container_status(name):
"""Quick single-container status check."""
status_info = get_container_status(name) status_info = get_container_status(name)
return jsonify(status_info) return jsonify(status_info)
@@ -272,22 +307,14 @@ def api_backups():
@app.route('/api/backups/log') @app.route('/api/backups/log')
@login_required @login_required
def api_backup_log(): def api_backup_log():
"""Return the last N backup log entries."""
limit = int(request.args.get('limit', 20)) limit = int(request.args.get('limit', 20))
entries = get_backup_log_entries(limit) entries = get_backup_log_entries(limit)
return jsonify({'entries': entries}) return jsonify({'entries': entries})
# ─────────────────────────────────────────────
# API — backup health audit
# ─────────────────────────────────────────────
@app.route('/api/backups/audit', methods=['POST']) @app.route('/api/backups/audit', methods=['POST'])
@login_required @login_required
def api_backup_audit(): def api_backup_audit():
"""
POST JSON: { "backup_file": "myapps-backup-…tar.gz", "source": "local"|"vm" }
Returns full audit report.
"""
data = request.get_json() or {} data = request.get_json() or {}
bfile = data.get('backup_file', '').strip() bfile = data.get('backup_file', '').strip()
source = data.get('source', 'local').strip() source = data.get('source', 'local').strip()
@@ -299,15 +326,9 @@ def api_backup_audit():
return jsonify(result) return jsonify(result)
# ─────────────────────────────────────────────
# API — delete backup
# ─────────────────────────────────────────────
@app.route('/api/backups/delete', methods=['POST']) @app.route('/api/backups/delete', methods=['POST'])
@login_required @login_required
def api_backup_delete(): def api_backup_delete():
"""
POST JSON: { "backup_file": "myapps-backup-…tar.gz", "source": "local"|"vm" }
"""
data = request.get_json() or {} data = request.get_json() or {}
bfile = data.get('backup_file', '').strip() bfile = data.get('backup_file', '').strip()
source = data.get('source', 'local').strip() source = data.get('source', 'local').strip()
@@ -319,17 +340,9 @@ def api_backup_delete():
return jsonify({'success': success, 'message': message}) return jsonify({'success': success, 'message': message})
# ─────────────────────────────────────────────
# API — manual backup trigger
# ─────────────────────────────────────────────
@app.route('/api/backups/run', methods=['POST']) @app.route('/api/backups/run', methods=['POST'])
@login_required @login_required
def api_backup_run(): def api_backup_run():
"""
Trigger a manual backup run on the main server.
Returns a job_id so the UI can poll /api/backups/run/status/<job_id>.
Only works when running on the main server (where the backup script lives).
"""
if not RUNNING_ON_MAIN_SERVER: if not RUNNING_ON_MAIN_SERVER:
return jsonify({ return jsonify({
'success': False, 'success': False,
@@ -352,7 +365,6 @@ def api_backup_run():
@app.route('/api/backups/run/status/<job_id>') @app.route('/api/backups/run/status/<job_id>')
@login_required @login_required
def api_backup_run_status(job_id): def api_backup_run_status(job_id):
"""Poll manual backup job status."""
job = backup_jobs.get(job_id) job = backup_jobs.get(job_id)
if not job: if not job:
return jsonify({'error': 'Job not found'}), 404 return jsonify({'error': 'Job not found'}), 404
@@ -364,7 +376,7 @@ def api_backup_run_status(job_id):
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
# API — users (LOCAL — users on this host) # API — users
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
@app.route('/api/users') @app.route('/api/users')
@login_required @login_required
@@ -442,7 +454,6 @@ def restore_start():
if not backup_file: if not backup_file:
return jsonify({'error': 'No backup file specified'}), 400 return jsonify({'error': 'No backup file specified'}), 400
# ── Resolve backup archive path ──────────────────────────────────────────
if backup_source == 'local': if backup_source == 'local':
if RUNNING_ON_MAIN_SERVER: if RUNNING_ON_MAIN_SERVER:
backup_path = f"/root/backups/{backup_file}" backup_path = f"/root/backups/{backup_file}"
@@ -498,7 +509,6 @@ def restore_start():
f"cd {session_dir} && bash restore-myapps.sh ; " f"cd {session_dir} && bash restore-myapps.sh ; "
f"EXIT=$? ; rm -rf {session_dir} ; exit $EXIT" f"EXIT=$? ; rm -rf {session_dir} ; exit $EXIT"
) )
else: else:
if not remote_ip: if not remote_ip:
return jsonify({'error': 'remote_ip required'}), 400 return jsonify({'error': 'remote_ip required'}), 400

View File

@@ -13,7 +13,7 @@ from config import (
) )
def _run(cmd, timeout=20): def _run(cmd, timeout=30):
try: try:
r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=timeout) r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=timeout)
return r.stdout.strip(), r.stderr.strip() return r.stdout.strip(), r.stderr.strip()
@@ -22,7 +22,6 @@ def _run(cmd, timeout=20):
def _human_bytes(n): def _human_bytes(n):
"""Human-readable byte size for audit UI."""
n = int(n) n = int(n)
if n < 1024: if n < 1024:
return f'{n} B' return f'{n} B'
@@ -35,16 +34,18 @@ def _human_bytes(n):
return f'{n / (1024 ** 4):.2f} TB' return f'{n / (1024 ** 4):.2f} TB'
def _ssh_main(remote_cmd, timeout=20): def _ssh_main(remote_cmd, timeout=30):
if RUNNING_ON_MAIN_SERVER: if RUNNING_ON_MAIN_SERVER:
return _run(remote_cmd, timeout=timeout) return _run(remote_cmd, timeout=timeout)
else: else:
escaped = remote_cmd.replace("'", "'\\''")
ssh = ( ssh = (
f"ssh -i {MAIN_SERVER_KEY} -p {MAIN_SERVER_PORT} " f"ssh -i {MAIN_SERVER_KEY} -p {MAIN_SERVER_PORT} "
f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 " f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 "
f"-o BatchMode=yes "
f"{MAIN_SERVER_USER}@{MAIN_SERVER_IP}" f"{MAIN_SERVER_USER}@{MAIN_SERVER_IP}"
) )
return _run(f"{ssh} '{remote_cmd}'", timeout=timeout) return _run(f"{ssh} '{escaped}'", timeout=timeout)
# ──────────────────────────────────────────────────────────────── # ────────────────────────────────────────────────────────────────
@@ -71,6 +72,7 @@ def get_vm_backups():
cmd = ( cmd = (
f"ssh -i {VM_KEY} -p {VM_PORT} " f"ssh -i {VM_KEY} -p {VM_PORT} "
f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 " f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 "
f"-o BatchMode=yes "
f"{VM_USER}@{VM_HOST} " f"{VM_USER}@{VM_HOST} "
f"'ls -t /backups/main-server/myapps-backup-*.tar.gz 2>/dev/null | head -20'" f"'ls -t /backups/main-server/myapps-backup-*.tar.gz 2>/dev/null | head -20'"
) )
@@ -96,22 +98,6 @@ def get_vm_backups():
# ──────────────────────────────────────────────────────────────── # ────────────────────────────────────────────────────────────────
def audit_backup(backup_file, source='local'): def audit_backup(backup_file, source='local'):
"""
Perform a health and integrity audit on a backup archive.
Checks:
1. File exists
2. File size sanity
3. SHA256 checksum (if .sha256 sidecar exists)
4. tar archive integrity (gzip test only — portable, no conflicting flags)
5. Expected internal structure
6. Path traversal / suspicious paths
7. Suspicious script files at unexpected locations (scripts only, not binaries)
8. Volume count
Returns:
{ ok, score, checks, summary }
"""
checks = [] checks = []
def add(name, status, detail='', more=None): def add(name, status, detail='', more=None):
@@ -120,13 +106,11 @@ def audit_backup(backup_file, source='local'):
entry['more'] = more entry['more'] = more
checks.append(entry) checks.append(entry)
# ── Resolve archive path ─────────────────────────────────────────────────
if source == 'local': if source == 'local':
archive_path = f"/root/backups/{backup_file}" archive_path = f"/root/backups/{backup_file}"
else: else:
archive_path = f"/backups/main-server/{backup_file}" archive_path = f"/backups/main-server/{backup_file}"
# On VM auditing a "local" (main server) backup → pull to /tmp first
if not RUNNING_ON_MAIN_SERVER and source == 'local': if not RUNNING_ON_MAIN_SERVER and source == 'local':
tmp_path = f"/tmp/audit_{backup_file}" tmp_path = f"/tmp/audit_{backup_file}"
if not os.path.exists(tmp_path): if not os.path.exists(tmp_path):
@@ -151,7 +135,6 @@ def audit_backup(backup_file, source='local'):
} }
archive_path = tmp_path archive_path = tmp_path
# ── CHECK 1: File exists ─────────────────────────────────────────────────
if not os.path.exists(archive_path): if not os.path.exists(archive_path):
add('File Exists', 'fail', f'Not found: {archive_path}') add('File Exists', 'fail', f'Not found: {archive_path}')
return { return {
@@ -165,7 +148,6 @@ def audit_backup(backup_file, source='local'):
} }
add('File Exists', 'pass', archive_path) add('File Exists', 'pass', archive_path)
# ── CHECK 2: File size ───────────────────────────────────────────────────
size_bytes = os.path.getsize(archive_path) size_bytes = os.path.getsize(archive_path)
size_mb = size_bytes / (1024 * 1024) size_mb = size_bytes / (1024 * 1024)
size_human = _human_bytes(size_bytes) size_human = _human_bytes(size_bytes)
@@ -183,7 +165,6 @@ def audit_backup(backup_file, source='local'):
add('File Size', 'pass', add('File Size', 'pass',
f'{size_human} — within expected range', more=size_more) f'{size_human} — within expected range', more=size_more)
# ── CHECK 3: SHA256 checksum ─────────────────────────────────────────────
sha_file = archive_path + '.sha256' sha_file = archive_path + '.sha256'
if os.path.exists(sha_file): if os.path.exists(sha_file):
try: try:
@@ -201,8 +182,6 @@ def audit_backup(backup_file, source='local'):
add('Checksum (SHA256)', 'warn', add('Checksum (SHA256)', 'warn',
'No .sha256 sidecar found — run a new backup to get checksums') 'No .sha256 sidecar found — run a new backup to get checksums')
# ── CHECK 4: Archive integrity ───────────────────────────────────────────
# Use gzip --test which works everywhere without conflicting tar flags
try: try:
result = subprocess.run( result = subprocess.run(
['gzip', '--test', archive_path], ['gzip', '--test', archive_path],
@@ -216,11 +195,9 @@ def audit_backup(backup_file, source='local'):
add('Archive Integrity', 'fail', add('Archive Integrity', 'fail',
f'gzip test failed: {(result.stderr or result.stdout)[:200]}') f'gzip test failed: {(result.stderr or result.stdout)[:200]}')
except FileNotFoundError: except FileNotFoundError:
# gzip not available — try python gzip
try: try:
import gzip import gzip
with gzip.open(archive_path, 'rb') as f: with gzip.open(archive_path, 'rb') as f:
# Read just the first few MB to check header validity
f.read(1024 * 1024) f.read(1024 * 1024)
add('Archive Integrity', 'pass', 'gzip header valid') add('Archive Integrity', 'pass', 'gzip header valid')
except Exception as e: except Exception as e:
@@ -230,7 +207,6 @@ def audit_backup(backup_file, source='local'):
except Exception as e: except Exception as e:
add('Archive Integrity', 'warn', f'Could not test: {e}') add('Archive Integrity', 'warn', f'Could not test: {e}')
# ── Read archive member list (used by checks 5, 6, 7, 8) ─────────────────
members = [] members = []
try: try:
with tarfile.open(archive_path, 'r:gz') as tf: with tarfile.open(archive_path, 'r:gz') as tf:
@@ -238,7 +214,6 @@ def audit_backup(backup_file, source='local'):
except Exception: except Exception:
pass pass
# ── CHECK 5: Internal structure ──────────────────────────────────────────
if members: if members:
has_volumes = any('volumes/' in m for m in members) has_volumes = any('volumes/' in m for m in members)
has_info = any('backup-info.txt' in m for m in members) has_info = any('backup-info.txt' in m for m in members)
@@ -259,7 +234,6 @@ def audit_backup(backup_file, source='local'):
else: else:
add('Internal Structure', 'warn', 'Could not inspect archive members') add('Internal Structure', 'warn', 'Could not inspect archive members')
# ── CHECK 6: Path traversal / suspicious paths ────────────────────────────
SUSPICIOUS = [ SUSPICIOUS = [
(r'\.\./', 'path traversal (..)'), (r'\.\./', 'path traversal (..)'),
(r'^/', 'absolute path in archive'), (r'^/', 'absolute path in archive'),
@@ -285,10 +259,6 @@ def audit_backup(backup_file, source='local'):
'(e.g. .ssh, /etc/shadow).', '(e.g. .ssh, /etc/shadow).',
]) ])
# ── CHECK 7: Suspicious scripts (smart — scripts only, not data files) ────
# Only flag actual text script files (.sh .py .pl .rb) with execute bits
# placed outside compose-files/ and outside known vendor directories.
# .bin, .so, .exe data files are intentionally excluded (too many false positives)
SCRIPT_EXTENSIONS = ('.sh', '.py', '.pl', '.rb', '.bash', '.zsh') SCRIPT_EXTENSIONS = ('.sh', '.py', '.pl', '.rb', '.bash', '.zsh')
SAFE_PREFIXES = ( SAFE_PREFIXES = (
'compose-files/', 'compose-files/',
@@ -303,10 +273,8 @@ def audit_backup(backup_file, source='local'):
if not member.isfile(): if not member.isfile():
continue continue
name = member.name name = member.name
# Skip files in known-safe directories
if any(name.startswith(p) or f'/{p}' in name for p in SAFE_PREFIXES): if any(name.startswith(p) or f'/{p}' in name for p in SAFE_PREFIXES):
continue continue
# Only flag actual script extensions with execute bits
name_lower = name.lower() name_lower = name.lower()
has_script_ext = any(name_lower.endswith(ext) for ext in SCRIPT_EXTENSIONS) has_script_ext = any(name_lower.endswith(ext) for ext in SCRIPT_EXTENSIONS)
has_exec_bit = bool(member.mode & 0o111) has_exec_bit = bool(member.mode & 0o111)
@@ -321,7 +289,6 @@ def audit_backup(backup_file, source='local'):
else: else:
add('Executable Scripts', 'pass', 'No unexpected executable scripts found') add('Executable Scripts', 'pass', 'No unexpected executable scripts found')
# ── CHECK 8: Volume count ────────────────────────────────────────────────
vol_archives = [m for m in members if 'volumes/' in m and m.endswith('.tar.gz')] vol_archives = [m for m in members if 'volumes/' in m and m.endswith('.tar.gz')]
v = len(vol_archives) v = len(vol_archives)
if v == 0: if v == 0:
@@ -331,7 +298,6 @@ def audit_backup(backup_file, source='local'):
else: else:
add('Volume Count', 'pass', f'{v} volume archives present') add('Volume Count', 'pass', f'{v} volume archives present')
# ── Score ─────────────────────────────────────────────────────────────────
weights = {'pass': 10, 'warn': 5, 'fail': 0} weights = {'pass': 10, 'warn': 5, 'fail': 0}
total = len(checks) * 10 total = len(checks) * 10
earned = sum(weights.get(c['status'], 0) for c in checks) earned = sum(weights.get(c['status'], 0) for c in checks)
@@ -427,6 +393,7 @@ def delete_backup(backup_file, source='local'):
cmd = ( cmd = (
f"ssh -i {VM_KEY} -p {VM_PORT} " f"ssh -i {VM_KEY} -p {VM_PORT} "
f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 " f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 "
f"-o BatchMode=yes "
f"{VM_USER}@{VM_HOST} " f"{VM_USER}@{VM_HOST} "
f"'rm -f /backups/main-server/{backup_file} " f"'rm -f /backups/main-server/{backup_file} "
f"/backups/main-server/{backup_file}.sha256'" f"/backups/main-server/{backup_file}.sha256'"
@@ -621,10 +588,50 @@ def get_all_stats():
# ──────────────────────────────────────────────────────────────── # ────────────────────────────────────────────────────────────────
# SYSTEM INFO # SYSTEM INFO — single batched SSH call
# ──────────────────────────────────────────────────────────────── # ────────────────────────────────────────────────────────────────
def get_system_info(): def get_system_info():
"""
Collect all system metrics in a SINGLE SSH call instead of 8 separate ones.
Emits a pipe-delimited line: cpu|mem|mem_pct|disk|disk_pct|load|uptime|docker_v|hostname
"""
batch_cmd = (
"printf '%s|%s|%s|%s|%s|%s|%s|%s|%s\\n' "
"\"$(top -bn1 | grep 'Cpu(s)' | awk '{print $2+$4}')\" "
"\"$(free -m | awk 'NR==2{printf \"%s/%sMB\", $3, $2}')\" "
"\"$(free | awk 'NR==2{printf \"%.0f\", $3/$2*100}')\" "
"\"$(df -h / | awk 'NR==2{printf \"%s/%s\", $3, $2}')\" "
"\"$(df / | awk 'NR==2{print $5}' | tr -d '%')\" "
"\"$(cat /proc/loadavg | awk '{print $1, $2, $3}')\" "
"\"$(uptime -p)\" "
"\"$(docker --version 2>/dev/null | cut -d' ' -f3 | tr -d ',')\" "
"\"$(hostname -f 2>/dev/null || hostname)\""
)
stdout, stderr = _ssh_main(batch_cmd, timeout=20)
# Parse the pipe-delimited result
if stdout and '|' in stdout:
# Use the last line in case there's extra output
for line in reversed(stdout.splitlines()):
line = line.strip()
if '|' in line:
parts = line.split('|')
if len(parts) >= 9:
return {
'cpu_pct': parts[0] or '0',
'memory': parts[1] or 'N/A',
'mem_pct': parts[2] or '0',
'disk': parts[3] or 'N/A',
'disk_pct': parts[4] or '0',
'load': parts[5] or 'N/A',
'uptime': parts[6] or 'N/A',
'docker_v': parts[7] or 'N/A',
'hostname': parts[8] or 'main server',
}
# Fallback: individual calls if batch failed
cpu_out, _ = _ssh_main("top -bn1 | grep 'Cpu(s)' | awk '{print $2+$4}'") cpu_out, _ = _ssh_main("top -bn1 | grep 'Cpu(s)' | awk '{print $2+$4}'")
mem_out, _ = _ssh_main("free -m | awk 'NR==2{printf \"%s/%sMB\", $3, $2}'") mem_out, _ = _ssh_main("free -m | awk 'NR==2{printf \"%s/%sMB\", $3, $2}'")
mem_pct, _ = _ssh_main("free | awk 'NR==2{printf \"%.0f\", $3/$2*100}'") mem_pct, _ = _ssh_main("free | awk 'NR==2{printf \"%.0f\", $3/$2*100}'")

View File

@@ -1,12 +1,17 @@
# modules/users.py
import os import os
import subprocess import subprocess
import pwd import pwd
import re import re
import tempfile import json
import stat
from config import (
RUNNING_ON_MAIN_SERVER,
MAIN_SERVER_IP, MAIN_SERVER_USER, MAIN_SERVER_KEY, MAIN_SERVER_PORT,
)
def _run(cmd, timeout=20): def _run(cmd, timeout=30):
try: try:
r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=timeout) r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=timeout)
return r.stdout.strip(), r.stderr.strip() return r.stdout.strip(), r.stderr.strip()
@@ -16,48 +21,85 @@ def _run(cmd, timeout=20):
return '', str(e) return '', str(e)
def _ssh_main(remote_cmd, timeout=30):
"""
Run a command ON THE MAIN SERVER.
- If already on main server → run locally.
- If on VM → SSH to main server first.
"""
if RUNNING_ON_MAIN_SERVER:
return _run(remote_cmd, timeout=timeout)
else:
# Escape single quotes in remote_cmd for safe shell wrapping
escaped = remote_cmd.replace("'", "'\\''")
ssh = (
f"ssh -i {MAIN_SERVER_KEY} -p {MAIN_SERVER_PORT} "
f"-o StrictHostKeyChecking=no -o ConnectTimeout=10 "
f"-o BatchMode=yes "
f"{MAIN_SERVER_USER}@{MAIN_SERVER_IP}"
)
return _run(f"{ssh} '{escaped}'", timeout=timeout)
# ────────────────────────────────────────────────────────────────
# USER LISTING — single batched SSH call to main server
# ────────────────────────────────────────────────────────────────
def get_all_users(): def get_all_users():
"""Return list of non-system users (uid >= 1000) with info.""" """
users = [] Return list of non-system users (uid >= 1000) from the MAIN SERVER.
Uses a SINGLE SSH call with a bash one-liner that collects all data at once,
instead of making 7+ SSH calls per user.
"""
# This script runs on the main server and emits one JSON line per user
batch_script = r"""
python3 - <<'PYEOF'
import subprocess, json, os, pwd
def run(cmd):
try: try:
for pw in pwd.getpwall(): r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=10)
if pw.pw_uid < 1000 or pw.pw_name == 'nobody': return r.stdout.strip()
except:
return ''
# Get all non-system users
passwd_lines = run("getent passwd | awk -F: '$3 >= 1000 && $1 != \"nobody\" {print}'")
users = []
for line in passwd_lines.splitlines():
parts = line.split(':')
if len(parts) < 6:
continue continue
uid = pw.pw_uid name = parts[0]
name = pw.pw_name uid = int(parts[2])
home = pw.pw_dir home = parts[5]
sock = f"/run/user/{uid}/docker.sock" sock = f"/run/user/{uid}/docker.sock"
has_docker = os.path.exists(sock) has_docker = os.path.exists(sock)
disk_used = run(f"du -sh {home} 2>/dev/null | cut -f1") or 'N/A'
disk_out, _ = _run(f"du -sh {home} 2>/dev/null | cut -f1") linger_out = run(f"loginctl show-user {name} --property=Linger 2>/dev/null")
disk_used = disk_out.strip() or 'N/A'
linger_out, _ = _run(f"loginctl show-user {name} --property=Linger 2>/dev/null")
linger = 'yes' in linger_out.lower() linger = 'yes' in linger_out.lower()
container_count = 0 container_count = 0
if has_docker: if has_docker:
cnt_out, _ = _run( cnt = run(f"DOCKER_HOST=unix://{sock} docker ps -aq 2>/dev/null | wc -l")
f"DOCKER_HOST=unix://{sock} docker ps -aq 2>/dev/null | wc -l"
)
try: try:
container_count = int(cnt_out.strip()) container_count = int(cnt)
except ValueError: except:
container_count = 0 pass
# Check if user has a dedicated virtual disk mounted img_path = f"/home/{name}.img"
disk_img = f"/home/{name}.img" has_vdisk = os.path.exists(img_path)
has_vdisk = os.path.exists(disk_img)
vdisk_mount = None vdisk_mount = None
vdisk_size = None vdisk_size = None
if has_vdisk: if has_vdisk:
# Check if it's mounted somewhere mnt = run(f"findmnt -S {img_path} -o TARGET --noheadings 2>/dev/null")
mnt_out, _ = _run(f"findmnt -S {disk_img} -o TARGET --noheadings 2>/dev/null") vdisk_mount = mnt or None
vdisk_mount = mnt_out.strip() or None sz = run(f"du -sh {img_path} 2>/dev/null | cut -f1")
# Get size of the image vdisk_size = sz or None
size_out, _ = _run(f"du -sh {disk_img} 2>/dev/null | cut -f1")
vdisk_size = size_out.strip() or None
users.append({ users.append({
'name': name, 'name': name,
@@ -72,24 +114,86 @@ def get_all_users():
'vdisk_mount': vdisk_mount, 'vdisk_mount': vdisk_mount,
'vdisk_size': vdisk_size, 'vdisk_size': vdisk_size,
}) })
except Exception as e:
print(f"[users] Error listing users: {e}") print(json.dumps(users))
PYEOF
"""
stdout, stderr = _ssh_main(batch_script, timeout=60)
if not stdout:
# Fallback: try a simpler approach if python3 one-liner fails
return _get_all_users_fallback()
# Find the JSON line (last non-empty line that starts with '[')
for line in reversed(stdout.splitlines()):
line = line.strip()
if line.startswith('['):
try:
return json.loads(line)
except json.JSONDecodeError:
break
return _get_all_users_fallback()
def _get_all_users_fallback():
"""
Simpler fallback: just get user names/UIDs, skip slow per-user checks.
Returns basic user list without docker/disk details.
"""
users = []
stdout, _ = _ssh_main(
"getent passwd | awk -F: '$3 >= 1000 && $1 != \"nobody\" {print $1\"|\"$3\"|\"$6}'"
)
if not stdout:
return users
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
parts = line.split('|')
if len(parts) < 3:
continue
try:
uid = int(parts[1])
except ValueError:
continue
users.append({
'name': parts[0],
'uid': uid,
'home': parts[2],
'has_docker': False,
'docker_socket': None,
'disk_used': 'N/A',
'linger': False,
'container_count': 0,
'has_vdisk': False,
'vdisk_mount': None,
'vdisk_size': None,
})
return users return users
# ────────────────────────────────────────────────────────────────
# USER CONTAINERS — always from main server
# ────────────────────────────────────────────────────────────────
def get_user_containers(username): def get_user_containers(username):
"""Get containers running under a specific user's rootless docker.""" """Get containers running under a specific user's rootless docker on the main server."""
uid_out, _ = _ssh_main(f"id -u {username} 2>/dev/null")
try: try:
pw = pwd.getpwnam(username) uid = int(uid_out.strip())
except KeyError: except ValueError:
return [] return []
uid = pw.pw_uid
sock = f"/run/user/{uid}/docker.sock" sock = f"/run/user/{uid}/docker.sock"
if not os.path.exists(sock): sock_check, _ = _ssh_main(f"test -S {sock} && echo yes || echo no")
if sock_check.strip() != 'yes':
return [] return []
out, _ = _run( out, _ = _ssh_main(
f"DOCKER_HOST=unix://{sock} " f"DOCKER_HOST=unix://{sock} "
f"docker ps -a --format '{{{{.Names}}}}|{{{{.Status}}}}|{{{{.Image}}}}|{{{{.Ports}}}}' 2>/dev/null" f"docker ps -a --format '{{{{.Names}}}}|{{{{.Status}}}}|{{{{.Image}}}}|{{{{.Ports}}}}' 2>/dev/null"
) )
@@ -110,7 +214,7 @@ def get_user_containers(username):
def get_all_users_containers(): def get_all_users_containers():
"""Get containers from ALL users' rootless docker instances.""" """Get containers from ALL users' rootless docker instances on main server."""
all_containers = [] all_containers = []
for user in get_all_users(): for user in get_all_users():
if user['has_docker']: if user['has_docker']:
@@ -118,92 +222,159 @@ def get_all_users_containers():
return all_containers return all_containers
# ────────────────────────────────────────────────────────────────
# USER DISK USAGE — always from main server
# ────────────────────────────────────────────────────────────────
def get_user_disk_usage(username):
home_out, _ = _ssh_main(
f"getent passwd {username} | cut -d: -f6"
)
home = home_out.strip()
if not home:
return {}
total_out, _ = _ssh_main(f"du -sh {home} 2>/dev/null | cut -f1")
img_path = f"/home/{username}.img"
vdisk_out, _ = _ssh_main(f"test -f {img_path} && echo yes || echo no")
vdisk_info = {}
if vdisk_out.strip() == 'yes':
df_out, _ = _ssh_main(f"df -h {home} 2>/dev/null | tail -1")
if df_out:
parts = df_out.split()
if len(parts) >= 4:
vdisk_info = {
'size': parts[1],
'used': parts[2],
'available': parts[3],
'use_pct': parts[4] if len(parts) > 4 else '?',
}
return {
'home': home,
'total': total_out or 'N/A',
'vdisk': vdisk_info,
}
# ────────────────────────────────────────────────────────────────
# CREATE / DELETE USER — only works on main server
# ────────────────────────────────────────────────────────────────
def create_user(username, password=None, setup_docker=True, disk_quota_mb=None): def create_user(username, password=None, setup_docker=True, disk_quota_mb=None):
""" """
Create a new system user and optionally set up rootless docker + virtual disk. Create a new system user on the MAIN SERVER.
disk_quota_mb: if set, creates a loop-device virtual disk of that size (MB) If called from VM, all commands SSH to main server.
and mounts it as the user's home directory.
Returns (success: bool, log_text: str) Returns (success: bool, log_text: str)
""" """
logs = [] logs = []
# Validate
if not re.match(r'^[a-z][a-z0-9_-]{1,30}$', username): if not re.match(r'^[a-z][a-z0-9_-]{1,30}$', username):
return False, "Invalid username. Use lowercase letters, numbers, _ or -" return False, "Invalid username. Use lowercase letters, numbers, _ or -"
# Check existence check_out, _ = _ssh_main(f"id {username} 2>/dev/null && echo exists || echo notfound")
try: if 'exists' in check_out:
pwd.getpwnam(username)
return False, f"User '{username}' already exists" return False, f"User '{username}' already exists"
except KeyError:
pass
# Create user out, err = _ssh_main(f"useradd -m -s /bin/bash {username}")
out, err = _run(f"useradd -m -s /bin/bash {username}")
if err and 'already exists' not in err: if err and 'already exists' not in err:
return False, f"useradd failed: {err}" return False, f"useradd failed: {err}"
logs.append(f"✅ User {username} created") logs.append(f"✅ User {username} created")
# Set password
if password: if password:
out, err = _run(f"echo '{username}:{password}' | chpasswd") out, err = _ssh_main(f"echo '{username}:{password}' | chpasswd")
if err: if err:
logs.append(f"⚠️ Password set failed: {err}") logs.append(f"⚠️ Password set failed: {err}")
else: else:
logs.append("✅ Password set") logs.append("✅ Password set")
# Install prerequisites _ssh_main(
_run("apt-get install -y uidmap dbus-user-session curl 2>/dev/null", timeout=60) "apt-get install -y uidmap dbus-user-session curl 2>/dev/null",
timeout=60
)
logs.append("✅ Prerequisites ready") logs.append("✅ Prerequisites ready")
# Enable linger _ssh_main(f"loginctl enable-linger {username}")
_run(f"loginctl enable-linger {username}")
logs.append("✅ Linger enabled") logs.append("✅ Linger enabled")
# Virtual disk (loop device) instead of quota
if disk_quota_mb: if disk_quota_mb:
if RUNNING_ON_MAIN_SERVER:
success, msg = _setup_virtual_disk(username, disk_quota_mb, logs) success, msg = _setup_virtual_disk(username, disk_quota_mb, logs)
if not success: if not success:
logs.append(f"⚠️ Virtual disk setup failed: {msg}") logs.append(f"⚠️ Virtual disk setup failed: {msg}")
else:
logs.append("⚠️ Virtual disk setup must be run directly on main server")
# Setup rootless docker
if setup_docker: if setup_docker:
if RUNNING_ON_MAIN_SERVER:
success, msg = _setup_rootless_docker_via_script(username, logs) success, msg = _setup_rootless_docker_via_script(username, logs)
if not success: if not success:
logs.append(f"⚠️ Docker setup incomplete: {msg}") logs.append(f"⚠️ Docker setup incomplete: {msg}")
else:
logs.append("⚠️ Rootless Docker setup must be run directly on main server")
return True, '\n'.join(logs) return True, '\n'.join(logs)
def delete_user(username, remove_home=False):
"""Remove a user from the MAIN SERVER. Returns (success, message)."""
logs = []
check_out, _ = _ssh_main(f"id {username} 2>/dev/null && echo exists || echo notfound")
if 'notfound' in check_out:
return False, f"User '{username}' does not exist"
uid_out, _ = _ssh_main(f"id -u {username} 2>/dev/null")
try:
uid = int(uid_out.strip())
_ssh_main(
f"XDG_RUNTIME_DIR=/run/user/{uid} "
f"su --login {username} --command "
f"'systemctl --user stop docker-rootless.service 2>/dev/null' 2>/dev/null"
)
except Exception:
pass
_ssh_main(f"loginctl disable-linger {username} 2>/dev/null")
if RUNNING_ON_MAIN_SERVER:
_remove_virtual_disk(username, logs)
else:
img_path = f"/home/{username}.img"
home = f"/home/{username}"
_ssh_main(f"umount {home} 2>/dev/null || true")
_ssh_main(f"rm -f {img_path} 2>/dev/null || true")
flag = '-r' if remove_home else ''
out, err = _ssh_main(f"userdel {flag} {username}")
if err and 'does not exist' not in err and 'mail spool' not in err:
return False, f"userdel error: {err}"
msg = f"✅ User {username} deleted" + (" (home removed)" if remove_home else "")
logs.append(msg)
return True, '\n'.join(logs)
# ────────────────────────────────────────────────────────────────
# LOCAL-ONLY HELPERS (only called when RUNNING_ON_MAIN_SERVER)
# ────────────────────────────────────────────────────────────────
def _setup_virtual_disk(username, disk_mb, logs): def _setup_virtual_disk(username, disk_mb, logs):
"""
Create a loop-device virtual disk for a user and mount it as their home.
Steps:
1. Create a blank image file at /home/<username>.img
2. Format it as ext4
3. Copy existing home contents into it
4. Mount it over /home/<username>
5. Add to /etc/fstab for persistence across reboots
6. Fix ownership
Returns (success: bool, message: str)
"""
try: try:
pw = pwd.getpwnam(username) pw = pwd.getpwnam(username)
except KeyError as e: except KeyError as e:
return False, str(e) return False, str(e)
home = pw.pw_dir # e.g. /home/secuser4 home = pw.pw_dir
img_path = f"/home/{username}.img" img_path = f"/home/{username}.img"
logs.append(f"📦 Creating {disk_mb} MB virtual disk at {img_path} ...") logs.append(f"📦 Creating {disk_mb} MB virtual disk at {img_path} ...")
# ── Step 1: Create the blank image ──────────────────────────────────────
# Use fallocate (fast, instant) with dd fallback
out, err = _run(f"fallocate -l {disk_mb}M {img_path}", timeout=60) out, err = _run(f"fallocate -l {disk_mb}M {img_path}", timeout=60)
if err and 'fallocate' in err: if err and 'fallocate' in err:
logs.append(" ↳ fallocate not available, using dd (this may take a moment)...") logs.append(" ↳ fallocate not available, using dd...")
out, err = _run( out, err = _run(
f"dd if=/dev/zero of={img_path} bs=1M count={disk_mb} status=none", f"dd if=/dev/zero of={img_path} bs=1M count={disk_mb} status=none",
timeout=600 timeout=600
@@ -212,33 +383,24 @@ def _setup_virtual_disk(username, disk_mb, logs):
return False, f"Failed to create image: {err}" return False, f"Failed to create image: {err}"
logs.append(f" ✅ Image file created ({disk_mb} MB)") logs.append(f" ✅ Image file created ({disk_mb} MB)")
# ── Step 2: Format as ext4 ───────────────────────────────────────────────
out, err = _run(f"mkfs.ext4 -F {img_path}", timeout=60) out, err = _run(f"mkfs.ext4 -F {img_path}", timeout=60)
if err and 'mke2fs' not in err and 'Discarding device blocks' not in err: if err and 'error' in err.lower() and 'failed' in err.lower():
# mkfs.ext4 writes info to stderr even on success; only fail on real errors
if 'error' in err.lower() or 'failed' in err.lower():
return False, f"mkfs.ext4 failed: {err}" return False, f"mkfs.ext4 failed: {err}"
logs.append(" ✅ Formatted as ext4") logs.append(" ✅ Formatted as ext4")
# ── Step 3: Back up current home contents ────────────────────────────────
tmp_backup = f"/tmp/{username}_home_backup" tmp_backup = f"/tmp/{username}_home_backup"
_run(f"cp -a {home}/. {tmp_backup}/ 2>/dev/null") _run(f"cp -a {home}/. {tmp_backup}/ 2>/dev/null")
# ── Step 4: Mount the image over the user's home ─────────────────────────
out, err = _run(f"mount -o loop {img_path} {home}") out, err = _run(f"mount -o loop {img_path} {home}")
if err: if err:
return False, f"mount failed: {err}" return False, f"mount failed: {err}"
logs.append(f" ✅ Mounted at {home}") logs.append(f" ✅ Mounted at {home}")
# ── Step 5: Restore home contents into the new disk ──────────────────────
_run(f"cp -a {tmp_backup}/. {home}/ 2>/dev/null") _run(f"cp -a {tmp_backup}/. {home}/ 2>/dev/null")
_run(f"rm -rf {tmp_backup}") _run(f"rm -rf {tmp_backup}")
# ── Step 6: Fix ownership ─────────────────────────────────────────────────
_run(f"chown -R {username}:{username} {home}") _run(f"chown -R {username}:{username} {home}")
logs.append(" ✅ Ownership set") logs.append(" ✅ Ownership set")
# ── Step 7: Add to /etc/fstab for persistence ────────────────────────────
fstab_line = f"{img_path} {home} ext4 loop,defaults 0 0\n" fstab_line = f"{img_path} {home} ext4 loop,defaults 0 0\n"
try: try:
with open('/etc/fstab', 'r') as f: with open('/etc/fstab', 'r') as f:
@@ -246,18 +408,15 @@ def _setup_virtual_disk(username, disk_mb, logs):
if img_path not in fstab: if img_path not in fstab:
with open('/etc/fstab', 'a') as f: with open('/etc/fstab', 'a') as f:
f.write(fstab_line) f.write(fstab_line)
logs.append(" ✅ Added to /etc/fstab (persistent across reboots)") logs.append(" ✅ Added to /etc/fstab")
else:
logs.append(" Already in /etc/fstab")
except Exception as e: except Exception as e:
logs.append(f" ⚠️ Could not update /etc/fstab: {e}") logs.append(f" ⚠️ Could not update /etc/fstab: {e}")
logs.append(f"✅ Virtual disk ready: {disk_mb} MB dedicated to {username}") logs.append(f"✅ Virtual disk ready: {disk_mb} MB for {username}")
return True, "ok" return True, "ok"
def _remove_virtual_disk(username, logs): def _remove_virtual_disk(username, logs):
"""Unmount and remove the virtual disk image for a user."""
try: try:
pw = pwd.getpwnam(username) pw = pwd.getpwnam(username)
home = pw.pw_dir home = pw.pw_dir
@@ -265,17 +424,13 @@ def _remove_virtual_disk(username, logs):
return return
img_path = f"/home/{username}.img" img_path = f"/home/{username}.img"
# Unmount
_run(f"umount {home} 2>/dev/null") _run(f"umount {home} 2>/dev/null")
logs.append(f" ↳ Unmounted {home}") logs.append(f" ↳ Unmounted {home}")
# Remove image
if os.path.exists(img_path): if os.path.exists(img_path):
os.remove(img_path) os.remove(img_path)
logs.append(f" ↳ Removed {img_path}") logs.append(f" ↳ Removed {img_path}")
# Remove from fstab
try: try:
with open('/etc/fstab', 'r') as f: with open('/etc/fstab', 'r') as f:
lines = f.readlines() lines = f.readlines()
@@ -289,10 +444,6 @@ def _remove_virtual_disk(username, logs):
def _setup_rootless_docker_via_script(username, logs): def _setup_rootless_docker_via_script(username, logs):
"""
Setup rootless Docker for a user by running the official installer.
This must be done AS the user in a proper login shell.
"""
try: try:
pw = pwd.getpwnam(username) pw = pwd.getpwnam(username)
uid = pw.pw_uid uid = pw.pw_uid
@@ -300,11 +451,9 @@ def _setup_rootless_docker_via_script(username, logs):
except KeyError as e: except KeyError as e:
return False, str(e) return False, str(e)
# First, ensure the sysctl setting is applied (critical!)
_run("sysctl -w kernel.apparmor_restrict_unprivileged_userns=0") _run("sysctl -w kernel.apparmor_restrict_unprivileged_userns=0")
_run("echo 'kernel.apparmor_restrict_unprivileged_userns=0' >> /etc/sysctl.conf") _run("echo 'kernel.apparmor_restrict_unprivileged_userns=0' >> /etc/sysctl.conf")
# Ensure XDG_RUNTIME_DIR exists with correct permissions
runtime_dir = f"/run/user/{uid}" runtime_dir = f"/run/user/{uid}"
os.makedirs(runtime_dir, exist_ok=True) os.makedirs(runtime_dir, exist_ok=True)
_run(f"chown {username}:{username} {runtime_dir}") _run(f"chown {username}:{username} {runtime_dir}")
@@ -312,28 +461,20 @@ def _setup_rootless_docker_via_script(username, logs):
logs.append(f"📝 Installing rootless Docker for {username}...") logs.append(f"📝 Installing rootless Docker for {username}...")
# Create a simple installation script that runs as the user
install_cmd = f"""bash -c ' install_cmd = f"""bash -c '
export XDG_RUNTIME_DIR=/run/user/{uid} export XDG_RUNTIME_DIR=/run/user/{uid}
export PATH=$HOME/bin:$PATH export PATH=$HOME/bin:$PATH
mkdir -p $XDG_RUNTIME_DIR mkdir -p $XDG_RUNTIME_DIR
chmod 700 $XDG_RUNTIME_DIR chmod 700 $XDG_RUNTIME_DIR
# Install rootless Docker
curl -fsSL https://get.docker.com/rootless | sh curl -fsSL https://get.docker.com/rootless | sh
# Add environment variables to .bashrc
echo "export PATH=$HOME/bin:\\$PATH" >> ~/.bashrc echo "export PATH=$HOME/bin:\\$PATH" >> ~/.bashrc
echo "export DOCKER_HOST=unix:///run/user/{uid}/docker.sock" >> ~/.bashrc echo "export DOCKER_HOST=unix:///run/user/{uid}/docker.sock" >> ~/.bashrc
echo "export XDG_RUNTIME_DIR=/run/user/{uid}" >> ~/.bashrc echo "export XDG_RUNTIME_DIR=/run/user/{uid}" >> ~/.bashrc
# Create systemd service
mkdir -p ~/.config/systemd/user mkdir -p ~/.config/systemd/user
cat > ~/.config/systemd/user/docker.service << EOF cat > ~/.config/systemd/user/docker.service << EOF
[Unit] [Unit]
Description=Docker Rootless Daemon Description=Docker Rootless Daemon
After=network.target After=network.target
[Service] [Service]
Type=simple Type=simple
ExecStart=$HOME/bin/dockerd-rootless.sh ExecStart=$HOME/bin/dockerd-rootless.sh
@@ -341,30 +482,20 @@ Restart=always
RestartSec=10 RestartSec=10
Environment=PATH=$HOME/bin:/usr/local/bin:/usr/bin:/bin Environment=PATH=$HOME/bin:/usr/local/bin:/usr/bin:/bin
Environment=XDG_RUNTIME_DIR=/run/user/{uid} Environment=XDG_RUNTIME_DIR=/run/user/{uid}
[Install] [Install]
WantedBy=default.target WantedBy=default.target
EOF EOF
# Enable and start the service
systemctl --user daemon-reload systemctl --user daemon-reload
systemctl --user enable docker.service systemctl --user enable docker.service
systemctl --user start docker.service systemctl --user start docker.service
# Wait for socket
sleep 5 sleep 5
'""" '"""
try: try:
# Run the installation as the user
result = subprocess.run( result = subprocess.run(
['su', '-', username, '-c', install_cmd], ['su', '-', username, '-c', install_cmd],
capture_output=True, capture_output=True, text=True, timeout=300
text=True,
timeout=300
) )
# Log output
for line in result.stdout.split('\n'): for line in result.stdout.split('\n'):
if line.strip(): if line.strip():
logs.append(line.strip()) logs.append(line.strip())
@@ -373,7 +504,6 @@ sleep 5
if line.strip(): if line.strip():
logs.append(f" stderr: {line.strip()}") logs.append(f" stderr: {line.strip()}")
# Check if socket exists
sock = f"/run/user/{uid}/docker.sock" sock = f"/run/user/{uid}/docker.sock"
if os.path.exists(sock): if os.path.exists(sock):
logs.append(f"✅ Rootless Docker ready for {username}") logs.append(f"✅ Rootless Docker ready for {username}")
@@ -388,68 +518,3 @@ sleep 5
except Exception as e: except Exception as e:
logs.append(f"⚠️ Setup failed: {e}") logs.append(f"⚠️ Setup failed: {e}")
return False, str(e) return False, str(e)
def delete_user(username, remove_home=False):
"""Remove a user. Returns (success, message)."""
logs = []
try:
pwd.getpwnam(username)
except KeyError:
return False, f"User '{username}' does not exist"
# Stop their docker service first
try:
pw = pwd.getpwnam(username)
_run(
f"XDG_RUNTIME_DIR=/run/user/{pw.pw_uid} "
f"su --login {username} --command 'systemctl --user stop docker-rootless.service 2>/dev/null' "
f"2>/dev/null"
)
except Exception:
pass
_run(f"loginctl disable-linger {username} 2>/dev/null")
# Clean up virtual disk BEFORE userdel (userdel might complain if home is busy)
_remove_virtual_disk(username, logs)
flag = '-r' if remove_home else ''
out, err = _run(f"userdel {flag} {username}")
if err and 'does not exist' not in err and 'mail spool' not in err:
return False, f"userdel error: {err}"
msg = f"✅ User {username} deleted" + (" (home removed)" if remove_home else "")
logs.append(msg)
return True, '\n'.join(logs)
def get_user_disk_usage(username):
try:
pw = pwd.getpwnam(username)
except KeyError:
return {}
total_out, _ = _run(f"du -sh {pw.pw_dir} 2>/dev/null | cut -f1")
# Also check if there's a virtual disk and its capacity
img_path = f"/home/{username}.img"
vdisk_info = {}
if os.path.exists(img_path):
# Get mounted filesystem usage
df_out, _ = _run(f"df -h {pw.pw_dir} 2>/dev/null | tail -1")
if df_out:
parts = df_out.split()
if len(parts) >= 4:
vdisk_info = {
'size': parts[1],
'used': parts[2],
'available': parts[3],
'use_pct': parts[4] if len(parts) > 4 else '?',
}
return {
'home': pw.pw_dir,
'total': total_out or 'N/A',
'vdisk': vdisk_info,
}

View File

@@ -15,22 +15,22 @@
<div class="metrics-row"> <div class="metrics-row">
<div class="metric-card cpu"> <div class="metric-card cpu">
<div class="metric-label">CPU USAGE</div> <div class="metric-label">CPU USAGE</div>
<div class="metric-value" id="m-cpu">{{ system.cpu_pct }}<span>%</span></div> <div class="metric-value" id="m-cpu">{{ system.cpu_pct or '…' }}<span>%</span></div>
<div class="gauge-bar"><div class="gauge-fill" id="g-cpu" style="width:{{ system.cpu_pct }}%"></div></div> <div class="gauge-bar"><div class="gauge-fill" id="g-cpu" style="width:{{ system.cpu_pct or 0 }}%"></div></div>
</div> </div>
<div class="metric-card mem"> <div class="metric-card mem">
<div class="metric-label">MEMORY</div> <div class="metric-label">MEMORY</div>
<div class="metric-value" id="m-mem" style="font-size:16px;">{{ system.memory }}</div> <div class="metric-value" id="m-mem" style="font-size:16px;">{{ system.memory or '…' }}</div>
<div class="gauge-bar"><div class="gauge-fill" id="g-mem" style="width:{{ system.mem_pct }}%"></div></div> <div class="gauge-bar"><div class="gauge-fill" id="g-mem" style="width:{{ system.mem_pct or 0 }}%"></div></div>
</div> </div>
<div class="metric-card disk"> <div class="metric-card disk">
<div class="metric-label">DISK /</div> <div class="metric-label">DISK /</div>
<div class="metric-value" id="m-disk" style="font-size:16px;">{{ system.disk }}</div> <div class="metric-value" id="m-disk" style="font-size:16px;">{{ system.disk or '…' }}</div>
<div class="gauge-bar"><div class="gauge-fill" id="g-disk" style="width:{{ system.disk_pct }}%"></div></div> <div class="gauge-bar"><div class="gauge-fill" id="g-disk" style="width:{{ system.disk_pct or 0 }}%"></div></div>
</div> </div>
<div class="metric-card load"> <div class="metric-card load">
<div class="metric-label">LOAD AVG</div> <div class="metric-label">LOAD AVG</div>
<div class="metric-value" id="m-load" style="font-size:16px;">{{ system.load }}</div> <div class="metric-value" id="m-load" style="font-size:16px;">{{ system.load or '…' }}</div>
<div class="gauge-bar"><div class="gauge-fill" id="g-load" style="width:10%"></div></div> <div class="gauge-bar"><div class="gauge-fill" id="g-load" style="width:10%"></div></div>
</div> </div>
</div> </div>
@@ -38,7 +38,7 @@
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">
<div class="card-title"><i class="fas fa-chart-line"></i> Overview</div> <div class="card-title"><i class="fas fa-chart-line"></i> Overview</div>
<span class="card-meta">Docker {{ system.docker_v }} · {{ main_server }}</span> <span class="card-meta" id="overview-meta">Docker {{ system.docker_v or '…' }} · {{ main_server }}</span>
</div> </div>
<div class="stat-row"> <div class="stat-row">
<div class="stat-card"><div class="stat-number" id="stat-total">{{ containers|length }}</div><div class="stat-label">App Containers</div></div> <div class="stat-card"><div class="stat-number" id="stat-total">{{ containers|length }}</div><div class="stat-label">App Containers</div></div>
@@ -99,10 +99,135 @@
<td><div class="action-btns">{{ ctr_actions(c.name) }}</div></td> <td><div class="action-btns">{{ ctr_actions(c.name) }}</div></td>
</tr> </tr>
{% else %} {% else %}
<tr><td colspan="9"><div class="empty-state"><i class="fas fa-inbox"></i>No containers</div></td></tr> <tr id="empty-row"><td colspan="9"><div class="empty-state"><i class="fas fa-inbox"></i>No containers</div></td></tr>
{% endfor %} {% endfor %}
</tbody> </tbody>
</table> </table>
</div> </div>
</div> </div>
{% if not running_on_main %}
<script>
(function () {
// ── Helper: build a container row identical to the Jinja template ──
function buildRow(c) {
const isUp = c.status && c.status.includes('Up');
const badge = isUp
? `<span class="badge badge-run">Running</span>`
: `<span class="badge badge-stop">Stopped</span>`;
return `
<tr data-ctr="${c.name}">
<td class="ct-name">${c.name}</td>
<td class="ctr-status-cell" data-ctr="${c.name}">${badge}</td>
<td><span class="stat-pct" data-ctr="${c.name}" data-stat="cpu">—</span></td>
<td>
<div class="stat-bar-wrap">
<div class="stat-bar-bg">
<div class="stat-bar-fill" data-ctr="${c.name}" data-stat="mem_bar" style="width:0%"></div>
</div>
<span class="stat-pct" data-ctr="${c.name}" data-stat="mem_pct">—</span>
</div>
</td>
<td><span class="stat-pct" data-ctr="${c.name}" data-stat="net" style="color:var(--cyan)">—</span></td>
<td class="col-extra app-extra" style="display:none;">
<span class="stat-pct" data-ctr="${c.name}" data-stat="block" style="color:var(--yellow)">—</span>
</td>
<td class="col-extra app-extra ct-image" style="display:none;">${c.image || ''}</td>
<td class="col-extra app-extra ct-ports" style="display:none;">${c.ports || '—'}</td>
<td><div class="action-btns">
<button class="ctr-action-btn restart" title="Restart" onclick="ctrAction('${c.name}','restart',this)">
<i class="fas fa-rotate-right"></i>
</button>
<button class="ctr-action-btn stop" title="Stop" onclick="ctrAction('${c.name}','stop',this)">
<i class="fas fa-stop"></i>
</button>
<button class="ctr-action-btn start" title="Start" onclick="ctrAction('${c.name}','start',this)">
<i class="fas fa-play"></i>
</button>
</div></td>
</tr>`;
}
// ── Populate metrics cards ──
function applySystem(s) {
if (!s) return;
const cpu = parseFloat(s.cpu_pct) || 0;
document.getElementById('m-cpu').innerHTML = `${cpu.toFixed(1)}<span>%</span>`;
document.getElementById('m-mem').textContent = s.memory || '—';
document.getElementById('m-disk').textContent = s.disk || '—';
document.getElementById('m-load').textContent = s.load || '—';
document.getElementById('g-cpu').style.width = `${Math.min(cpu, 100)}%`;
document.getElementById('g-mem').style.width = `${Math.min(parseFloat(s.mem_pct) || 0, 100)}%`;
document.getElementById('g-disk').style.width = `${Math.min(parseFloat(s.disk_pct) || 0, 100)}%`;
if (s.docker_v) {
const meta = document.getElementById('overview-meta');
if (meta) meta.textContent = `Docker ${s.docker_v} · {{ main_server }}`;
}
}
// ── Populate overview stat numbers ──
function applyStats(data) {
const set = (id, val) => {
const el = document.getElementById(id);
if (el && val !== undefined && val !== null) el.textContent = val;
};
set('stat-total', data.containers ? data.containers.length : undefined);
set('stat-running', data.running_count);
set('stat-users', data.user_count);
set('stat-local-bk', data.local_backups);
set('stat-vm-bk', data.vm_backups);
}
// ── Populate the containers table ──
function applyContainers(containers) {
if (!containers || !containers.length) return;
const tbody = document.getElementById('app-containers-body');
if (!tbody) return;
tbody.innerHTML = containers.map(buildRow).join('');
// Re-apply column visibility in case "Show more" was already toggled
const extras = tbody.querySelectorAll('.app-extra');
const btn = document.getElementById('app-toggle-btn');
if (btn && btn.dataset.expanded === 'true') {
extras.forEach(el => el.style.display = '');
}
// Kick stats refresh if a global function exists (from base template)
if (typeof refreshContainerStats === 'function') {
refreshContainerStats();
}
}
// ── Main async loader ──
async function loadDashboardAsync() {
try {
const res = await fetch('/api/dashboard');
if (!res.ok) throw new Error(`HTTP ${res.status}`);
const data = await res.json();
applySystem(data.system);
applyStats(data);
applyContainers(data.containers);
} catch (err) {
console.error('[dashboard] async load failed:', err);
}
}
// Run immediately on DOM ready
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', loadDashboardAsync);
} else {
loadDashboardAsync();
}
})();
</script>
{% endif %}
{% endblock %} {% endblock %}

View File

@@ -240,3 +240,20 @@ echo " Name: $BACKUP_NAME"
echo " Local: $BACKUP_ARCHIVE ($COMPRESSED_SIZE)" echo " Local: $BACKUP_ARCHIVE ($COMPRESSED_SIZE)"
echo " Remote: ${VM_HOST}:${VM_DEST}${BACKUP_NAME}.tar.gz" echo " Remote: ${VM_HOST}:${VM_DEST}${BACKUP_NAME}.tar.gz"
echo "=========================================" echo "========================================="
# ── Chiffrement AES-256 ──────────────────────────────────────────────────────
encrypt_backup() {
echo "🔐 Chiffrement AES-256..."
openssl enc -aes-256-cbc -pbkdf2 -pass pass:Navitrends2024! \
-in "$BACKUP_ARCHIVE" \
-out "${BACKUP_ARCHIVE}.enc"
rm -f "$BACKUP_ARCHIVE"
echo "✅ Archive chiffrée : ${BACKUP_ARCHIVE}.enc"
}
# ── Notification email échec ─────────────────────────────────────────────────
notify_failure() {
echo "📧 Envoi notification échec..."
echo "Backup FAILED: $BACKUP_NAME" | \
mail -s "[Navitrends] BACKUP FAILED - $(date)" arijabidi577@gmail.com
}