From bd907c8d40f5d8912b7ee08616ddd438fc816372 Mon Sep 17 00:00:00 2001 From: j Date: Sat, 7 Mar 2026 19:32:02 +1300 Subject: [PATCH] Yay --- app/Dockerfile | 11 ++ app/app.py | 370 +++++++++++++++++++++++++++++++++++++ app/gather_info.sh | 136 ++++++++++++++ app/requirements.txt | 5 + app/static/style.css | 318 +++++++++++++++++++++++++++++++ app/templates/index.html | 241 ++++++++++++++++++++++++ backup.sh | 12 ++ config/infrastructure.conf | 20 ++ config/service.env | 18 ++ destroy.sh | 13 ++ docker-compose.yml | 40 ++++ install-pre.sh | 10 + install.sh | 22 +++ logs.sh | 8 + ports.sh | 5 + restore.sh | 13 ++ start.sh | 11 ++ status.sh | 23 +++ stop.sh | 10 + template_info.env | 5 + uninstall.sh | 12 ++ 21 files changed, 1303 insertions(+) create mode 100644 app/Dockerfile create mode 100644 app/app.py create mode 100755 app/gather_info.sh create mode 100644 app/requirements.txt create mode 100644 app/static/style.css create mode 100644 app/templates/index.html create mode 100755 backup.sh create mode 100644 config/infrastructure.conf create mode 100644 config/service.env create mode 100755 destroy.sh create mode 100644 docker-compose.yml create mode 100755 install-pre.sh create mode 100755 install.sh create mode 100755 logs.sh create mode 100755 ports.sh create mode 100755 restore.sh create mode 100755 start.sh create mode 100755 status.sh create mode 100755 stop.sh create mode 100644 template_info.env create mode 100755 uninstall.sh diff --git a/app/Dockerfile b/app/Dockerfile new file mode 100644 index 0000000..9deac75 --- /dev/null +++ b/app/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +EXPOSE 5000 +CMD ["python", "app.py"] diff --git a/app/app.py b/app/app.py new file mode 100644 index 0000000..a9dea15 --- /dev/null +++ b/app/app.py @@ -0,0 +1,370 @@ +import os +import re +import time +import logging +import threading +from datetime import datetime, timezone +from concurrent.futures import ThreadPoolExecutor, as_completed + +import paramiko +import pymysql +pymysql.install_as_MySQLdb() + +from flask import Flask, render_template, jsonify +from flask_sqlalchemy import SQLAlchemy + +logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') +logger = logging.getLogger(__name__) + +app = Flask(__name__) +app.config['SQLALCHEMY_DATABASE_URI'] = ( + f"mysql+pymysql://{os.environ['MYSQL_USER']}:{os.environ['MYSQL_PASSWORD']}" + f"@{os.environ.get('MYSQL_HOST', 'db')}/{os.environ['MYSQL_DATABASE']}" +) +app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {'pool_recycle': 280, 'pool_pre_ping': True} +db = SQLAlchemy(app) + +COLLECTION_INTERVAL = int(os.environ.get('COLLECTION_INTERVAL', 300)) +MAX_CONCURRENT_SSH = int(os.environ.get('MAX_CONCURRENT_SSH', 5)) +SSH_KEY_PATH = '/app/ssh_key' +INFRA_CONF_PATH = '/app/infrastructure.conf' + + +# --- Database Model --- + +class Server(db.Model): + __tablename__ = 'servers' + id = db.Column(db.Integer, primary_key=True) + group_name = db.Column(db.String(255), nullable=False) + username = db.Column(db.String(255), nullable=False) + hostname = db.Column(db.String(255), nullable=False) + primary_ip = db.Column(db.String(45), default='') + is_online = db.Column(db.Boolean, default=False) + last_collected = db.Column(db.DateTime, nullable=True) + details = db.Column(db.JSON, nullable=True) + __table_args__ = (db.UniqueConstraint('username', 'hostname', name='uq_user_host'),) + + +# --- Config Parsing --- + +def parse_infrastructure_conf(): + servers = [] + current_group = None + try: + with open(INFRA_CONF_PATH) as f: + for line in f: + line = line.rstrip('\n') + if not line.strip() or line.strip().startswith('#'): + continue + if line[0] not in (' ', '\t'): + current_group = line.strip() + else: + entry = line.strip() + if '@' in entry: + user, host = entry.split('@', 1) + servers.append({ + 'group': current_group or 'Default', + 'username': user.strip(), + 'hostname': host.strip(), + }) + except FileNotFoundError: + logger.error("infrastructure.conf not found at %s", INFRA_CONF_PATH) + return servers + + +# --- SSH Collection --- + +def load_ssh_key(): + for key_class in [paramiko.Ed25519Key, paramiko.RSAKey, paramiko.ECDSAKey]: + try: + return key_class.from_private_key_file(SSH_KEY_PATH) + except Exception: + continue + raise RuntimeError(f"Could not load SSH key from {SSH_KEY_PATH}") + + +def collect_one(entry, ssh_key): + """SSH into a single server, run the gather script, return parsed data.""" + try: + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect( + entry['hostname'], + username=entry['username'], + pkey=ssh_key, + timeout=15, + banner_timeout=15, + auth_timeout=15, + ) + + with open('/app/gather_info.sh') as f: + script = f.read() + + stdin, stdout, stderr = ssh.exec_command('bash -s', timeout=60) + stdin.write(script) + stdin.channel.shutdown_write() + output = stdout.read().decode('utf-8', errors='replace') + ssh.close() + + data = parse_gather_output(output) + data['is_online'] = True + return data + except Exception as e: + logger.warning("Failed to collect from %s@%s: %s", entry['username'], entry['hostname'], e) + return {'is_online': False, 'error': str(e)} + + +def parse_gather_output(output): + """Parse the [section] key=value output from gather_info.sh.""" + data = {} + current_section = None + + for line in output.split('\n'): + line = line.strip() + if not line: + continue + + # Section header: [name] or [name:id] + m = re.match(r'^\[(.+)\]$', line) + if m: + section = m.group(1) + if section == 'end': + break + if ':' in section: + base, name = section.split(':', 1) + if base not in data: + data[base] = [] + item = {'_name': name} + data[base].append(item) + current_section = ('list', item) + else: + if section not in data: + data[section] = {} + current_section = ('dict', data[section]) + continue + + # Key=value + if '=' in line and current_section: + key, _, value = line.partition('=') + key = key.strip() + value = value.strip() + + if current_section[0] == 'dict': + section_data = current_section[1] + # Handle repeated keys (e.g., dns server=) + if key in section_data: + if not isinstance(section_data[key], list): + section_data[key] = [section_data[key]] + section_data[key].append(value) + else: + section_data[key] = value + elif current_section[0] == 'list': + current_section[1][key] = value + + return data + + +# --- Collection Loop --- + +def collect_all(): + entries = parse_infrastructure_conf() + if not entries: + logger.info("No servers configured in infrastructure.conf") + return + + try: + ssh_key = load_ssh_key() + except Exception as e: + logger.error("SSH key error: %s", e) + return + + logger.info("Collecting from %d servers (max %d concurrent)", len(entries), MAX_CONCURRENT_SSH) + results = {} + + with ThreadPoolExecutor(max_workers=MAX_CONCURRENT_SSH) as pool: + futures = {pool.submit(collect_one, e, ssh_key): e for e in entries} + for future in as_completed(futures): + entry = futures[future] + key = f"{entry['username']}@{entry['hostname']}" + try: + results[key] = (entry, future.result(timeout=90)) + except Exception as e: + results[key] = (entry, {'is_online': False, 'error': str(e)}) + + # Update database (all in main collector thread) + with app.app_context(): + for key, (entry, result) in results.items(): + server = Server.query.filter_by( + username=entry['username'], + hostname=entry['hostname'], + ).first() + + if not server: + server = Server( + group_name=entry['group'], + username=entry['username'], + hostname=entry['hostname'], + ) + db.session.add(server) + + server.group_name = entry['group'] + server.is_online = result.get('is_online', False) + server.last_collected = datetime.now(timezone.utc) + server.details = result + + # Extract primary IP: prefer the interface carrying the default route + default_iface = '' + routing = result.get('routing', {}) + if isinstance(routing, dict): + default_iface = routing.get('interface', '') + + primary_ip = '' + for iface in result.get('net', []): + ipv4 = iface.get('ipv4', '') + if not ipv4 or ipv4.startswith('127.'): + continue + iface_name = iface.get('name', '') or iface.get('_name', '') + if iface_name == default_iface: + primary_ip = ipv4 + break + if not primary_ip: + primary_ip = ipv4 + server.primary_ip = primary_ip + + db.session.commit() + logger.info("Collection complete, updated %d servers", len(results)) + + +def collector_loop(): + time.sleep(10) # Let the app start up + while True: + try: + collect_all() + except Exception as e: + logger.error("Collection loop error: %s", e) + time.sleep(COLLECTION_INTERVAL) + + +# --- Web Routes --- + +@app.route('/') +def index(): + servers = Server.query.order_by(Server.group_name, Server.primary_ip).all() + groups = {} + for s in servers: + g = s.group_name or 'Default' + if g not in groups: + groups[g] = [] + groups[g].append(s) + + # Sort servers within each group by IP (numerically) + for g in groups: + groups[g].sort(key=lambda s: _ip_sort_key(s.primary_ip)) + + return render_template('index.html', groups=groups) + + +@app.route('/api/servers') +def api_servers(): + servers = Server.query.all() + result = [] + for s in servers: + result.append({ + 'id': s.id, + 'group_name': s.group_name, + 'username': s.username, + 'hostname': s.hostname, + 'primary_ip': s.primary_ip, + 'is_online': s.is_online, + 'last_collected': s.last_collected.isoformat() if s.last_collected else None, + 'details': s.details, + }) + return jsonify(result) + + +def _ip_sort_key(ip_str): + if not ip_str: + return [999, 999, 999, 999] + try: + return [int(x) for x in ip_str.split('.')] + except (ValueError, AttributeError): + return [999, 999, 999, 999] + + +# --- Jinja2 Helpers --- + +@app.template_filter('format_bytes') +def format_bytes(value): + try: + b = int(value) + except (TypeError, ValueError): + return value + for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + if abs(b) < 1024: + return f"{b:.1f} {unit}" + b /= 1024 + return f"{b:.1f} PB" + + +@app.template_filter('format_mb') +def format_mb(value): + try: + mb = int(value) + except (TypeError, ValueError): + return value + if mb >= 1024: + return f"{mb / 1024:.1f} GB" + return f"{mb} MB" + + +@app.template_filter('format_uptime') +def format_uptime(seconds): + try: + s = int(seconds) + except (TypeError, ValueError): + return 'Unknown' + days = s // 86400 + hours = (s % 86400) // 3600 + if days > 0: + return f"{days}d {hours}h" + minutes = (s % 3600) // 60 + return f"{hours}h {minutes}m" + + +@app.template_filter('usage_color') +def usage_color(percent): + try: + p = float(percent) + except (TypeError, ValueError): + return '#64748b' + if p >= 90: + return '#ef4444' + if p >= 75: + return '#f97316' + if p >= 60: + return '#eab308' + return '#22c55e' + + +# --- Main --- + +if __name__ == '__main__': + # Wait for database + for attempt in range(30): + try: + with app.app_context(): + db.create_all() + logger.info("Database ready") + break + except Exception as e: + logger.info("Waiting for database... (%s)", e) + time.sleep(2) + else: + logger.error("Could not connect to database after 30 attempts") + exit(1) + + # Start collector thread + collector_thread = threading.Thread(target=collector_loop, daemon=True) + collector_thread.start() + + app.run(host='0.0.0.0', port=5000, threaded=True) diff --git a/app/gather_info.sh b/app/gather_info.sh new file mode 100755 index 0000000..af6026a --- /dev/null +++ b/app/gather_info.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# Gather system information from a remote server +# Output format: [section] headers followed by key=value pairs + +echo "[system]" +echo "hostname=$(hostname)" +if [ -f /etc/os-release ]; then + . /etc/os-release + echo "os_name=$NAME" + echo "os_version=$VERSION_ID" + echo "os_pretty=$PRETTY_NAME" +fi +echo "kernel=$(uname -r)" +echo "arch=$(uname -m)" +echo "uptime_seconds=$(cut -d' ' -f1 /proc/uptime 2>/dev/null | cut -d. -f1)" + +# Motherboard (readable without root on most systems) +echo "board_vendor=$(cat /sys/class/dmi/id/board_vendor 2>/dev/null || echo 'Unknown')" +echo "board_name=$(cat /sys/class/dmi/id/board_name 2>/dev/null || echo 'Unknown')" +echo "board_version=$(cat /sys/class/dmi/id/board_version 2>/dev/null || echo 'Unknown')" +echo "bios_version=$(cat /sys/class/dmi/id/bios_version 2>/dev/null || echo 'Unknown')" +echo "bios_date=$(cat /sys/class/dmi/id/bios_date 2>/dev/null || echo 'Unknown')" + +echo "[cpu]" +echo "model=$(lscpu 2>/dev/null | grep 'Model name' | sed 's/Model name:[[:space:]]*//')" +echo "cores=$(nproc 2>/dev/null || echo 0)" +echo "sockets=$(lscpu 2>/dev/null | grep 'Socket(s)' | awk '{print $2}')" +echo "threads_per_core=$(lscpu 2>/dev/null | grep 'Thread(s) per core' | awk '{print $2}')" + +# CPU usage - sample /proc/stat with 1 second interval +read -r label user1 nice1 system1 idle1 iowait1 irq1 softirq1 steal1 < /proc/stat +sleep 1 +read -r label user2 nice2 system2 idle2 iowait2 irq2 softirq2 steal2 < /proc/stat + +total1=$((user1 + nice1 + system1 + idle1 + iowait1 + irq1 + softirq1 + steal1)) +total2=$((user2 + nice2 + system2 + idle2 + iowait2 + irq2 + softirq2 + steal2)) +diff_total=$((total2 - total1)) +diff_idle=$((idle2 - idle1)) +if [ $diff_total -gt 0 ]; then + usage_x10=$(( (diff_total - diff_idle) * 1000 / diff_total )) + whole=$((usage_x10 / 10)) + frac=$((usage_x10 % 10)) + echo "usage_percent=${whole}.${frac}" +else + echo "usage_percent=0.0" +fi + +echo "[memory]" +total_kb=$(grep MemTotal /proc/meminfo 2>/dev/null | awk '{print $2}') +available_kb=$(grep MemAvailable /proc/meminfo 2>/dev/null | awk '{print $2}') +if [ -n "$total_kb" ] && [ -n "$available_kb" ]; then + used_kb=$((total_kb - available_kb)) + total_mb=$((total_kb / 1024)) + used_mb=$((used_kb / 1024)) + available_mb=$((available_kb / 1024)) + if [ "$total_kb" -gt 0 ]; then + usage_x10=$((used_kb * 1000 / total_kb)) + whole=$((usage_x10 / 10)) + frac=$((usage_x10 % 10)) + echo "usage_percent=${whole}.${frac}" + else + echo "usage_percent=0.0" + fi + echo "total_mb=$total_mb" + echo "used_mb=$used_mb" + echo "available_mb=$available_mb" +fi + +# Physical disks +lsblk -b -n -o NAME,SIZE,TYPE 2>/dev/null | while read -r name size type; do + if [ "$type" = "disk" ]; then + echo "[disk:$name]" + echo "name=$name" + echo "size_bytes=$size" + fi +done + +# Mounted filesystem usage +df -B1 --output=target,size,used,avail,pcent 2>/dev/null | tail -n +2 | while read -r mount total used avail percent; do + case "$mount" in + /|/home|/var|/tmp|/boot|/data*|/mnt*|/srv*|/opt*) + safename=$(echo "$mount" | tr '/' '_') + echo "[disk_usage:${safename}]" + echo "mount=$mount" + echo "total_bytes=$total" + echo "used_bytes=$used" + echo "available_bytes=$avail" + echo "usage_percent=${percent%\%}" + ;; + esac +done + +# GPUs +gpu_idx=0 +lspci 2>/dev/null | grep -iE 'vga|3d|display' | while read -r line; do + echo "[gpu:$gpu_idx]" + echo "description=$line" + gpu_idx=$((gpu_idx + 1)) +done + +# Network interfaces +for iface in $(ls /sys/class/net/ 2>/dev/null); do + [ "$iface" = "lo" ] && continue + + echo "[net:$iface]" + echo "name=$iface" + echo "mac=$(cat /sys/class/net/$iface/address 2>/dev/null)" + echo "ipv4=$(ip -4 addr show "$iface" 2>/dev/null | grep -oP 'inet \K[0-9.]+' | head -1)" + echo "ipv6=$(ip -6 addr show "$iface" 2>/dev/null | grep -oP 'inet6 \K[0-9a-f:]+' | grep -v '^fe80' | head -1)" + echo "state=$(cat /sys/class/net/$iface/operstate 2>/dev/null)" + + speed=$(cat /sys/class/net/$iface/speed 2>/dev/null) + [ -n "$speed" ] && [ "$speed" != "-1" ] && echo "speed_mbps=$speed" + + driver=$(readlink /sys/class/net/$iface/device/driver 2>/dev/null | xargs basename 2>/dev/null) + [ -n "$driver" ] && echo "driver=$driver" +done + +echo "[routing]" +default_line=$(ip route 2>/dev/null | grep default | head -1) +echo "gateway=$(echo "$default_line" | awk '{print $3}')" +echo "interface=$(echo "$default_line" | awk '{print $5}')" + +echo "[dns]" +grep -E '^nameserver' /etc/resolv.conf 2>/dev/null | awk '{print "server=" $2}' + +echo "[tailscale]" +if command -v tailscale &>/dev/null; then + echo "installed=true" + echo "ipv4=$(tailscale ip -4 2>/dev/null)" + echo "hostname=$(tailscale status --self --json 2>/dev/null | grep -o '"DNSName":"[^"]*"' | head -1 | cut -d'"' -f4)" +else + echo "installed=false" +fi + +echo "[end]" diff --git a/app/requirements.txt b/app/requirements.txt new file mode 100644 index 0000000..965d8c6 --- /dev/null +++ b/app/requirements.txt @@ -0,0 +1,5 @@ +flask==3.1.* +flask-sqlalchemy==3.1.* +pymysql==1.1.* +paramiko==3.5.* +cryptography>=43.0 diff --git a/app/static/style.css b/app/static/style.css new file mode 100644 index 0000000..b21afdb --- /dev/null +++ b/app/static/style.css @@ -0,0 +1,318 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; + background: #0f172a; + color: #e2e8f0; + min-height: 100vh; +} + +header { + background: #1e293b; + border-bottom: 1px solid #334155; + padding: 20px 32px; + display: flex; + align-items: baseline; + gap: 16px; +} + +header h1 { + font-size: 1.5rem; + font-weight: 600; + color: #f1f5f9; +} + +.subtitle { + font-size: 0.8rem; + color: #64748b; +} + +main { + max-width: 1400px; + margin: 0 auto; + padding: 24px; +} + +/* --- Group --- */ + +.group { + margin-bottom: 32px; +} + +.group-header { + font-size: 1.1rem; + font-weight: 600; + color: #3b82f6; + padding: 8px 0 12px 0; + border-bottom: 2px solid #1e3a5f; + margin-bottom: 16px; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.server-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(260px, 1fr)); + gap: 12px; +} + +/* --- Server Card --- */ + +.server-card { + background: #1e293b; + border: 1px solid #334155; + border-radius: 10px; + padding: 16px; + cursor: pointer; + transition: all 0.2s ease; + position: relative; +} + +.server-card:hover { + border-color: #475569; + transform: translateY(-1px); + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); +} + +.server-card.expanded { + grid-column: 1 / -1; + border-color: #3b82f6; + box-shadow: 0 0 20px rgba(59, 130, 246, 0.15); +} + +.server-card.offline .card-summary { + opacity: 0.6; +} + +.card-header { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 4px; +} + +.status-dot { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} + +.status-dot.online { + background: #22c55e; + box-shadow: 0 0 6px rgba(34, 197, 94, 0.5); +} + +.status-dot.offline { + background: #ef4444; + box-shadow: 0 0 6px rgba(239, 68, 68, 0.5); +} + +.server-name { + font-weight: 600; + font-size: 0.95rem; + color: #f1f5f9; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.server-ip { + font-size: 0.8rem; + color: #94a3b8; + margin-bottom: 2px; + font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace; +} + +.server-os { + font-size: 0.75rem; + color: #64748b; + margin-bottom: 10px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.offline-label { + font-size: 0.8rem; + color: #ef4444; + font-style: italic; + margin-top: 8px; +} + +/* --- Usage Bars --- */ + +.usage-bars { + display: flex; + flex-direction: column; + gap: 5px; +} + +.usage-row { + display: flex; + align-items: center; + gap: 6px; +} + +.usage-label { + font-size: 0.65rem; + font-weight: 600; + color: #64748b; + width: 30px; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.usage-bar-bg { + flex: 1; + height: 6px; + background: #334155; + border-radius: 3px; + overflow: hidden; +} + +.usage-bar-fill { + height: 100%; + border-radius: 3px; + transition: width 0.5s ease; + min-width: 2px; +} + +.usage-pct { + font-size: 0.7rem; + color: #94a3b8; + width: 30px; + text-align: right; + font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace; +} + +/* --- Card Details --- */ + +.card-details { + margin-top: 16px; + padding-top: 16px; + border-top: 1px solid #334155; +} + +.details-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); + gap: 16px; +} + +.detail-section { + background: #0f172a; + border-radius: 8px; + padding: 12px; + border: 1px solid #1e3a5f; +} + +.detail-section.wide { + grid-column: 1 / -1; +} + +.detail-section h4 { + font-size: 0.75rem; + font-weight: 600; + color: #3b82f6; + text-transform: uppercase; + letter-spacing: 0.1em; + margin-bottom: 8px; +} + +.detail-section table { + width: 100%; + border-collapse: collapse; + font-size: 0.8rem; +} + +.detail-section td { + padding: 3px 8px 3px 0; + vertical-align: top; +} + +.detail-section td:first-child { + color: #64748b; + white-space: nowrap; + width: 1%; +} + +.detail-section td:last-child { + color: #cbd5e1; + word-break: break-all; +} + +.table-header td { + font-weight: 600; + color: #94a3b8 !important; + border-bottom: 1px solid #334155; + padding-bottom: 4px; + margin-bottom: 4px; + font-size: 0.7rem; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.disk-pct { + font-weight: 600; + font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace; +} + +.error-text { + color: #ef4444; + font-size: 0.8rem; + font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace; +} + +.last-updated { + margin-top: 12px; + font-size: 0.7rem; + color: #475569; + text-align: right; +} + +/* --- Empty State --- */ + +.empty-state { + text-align: center; + padding: 80px 24px; + color: #64748b; +} + +.empty-state h2 { + font-size: 1.2rem; + margin-bottom: 8px; + color: #94a3b8; +} + +.empty-state code { + background: #1e293b; + padding: 2px 8px; + border-radius: 4px; + font-size: 0.85rem; + color: #3b82f6; +} + +/* --- Responsive --- */ + +@media (max-width: 600px) { + header { + padding: 16px; + } + + main { + padding: 12px; + } + + .server-grid { + grid-template-columns: 1fr; + } + + .details-grid { + grid-template-columns: 1fr; + } +} diff --git a/app/templates/index.html b/app/templates/index.html new file mode 100644 index 0000000..54a29f0 --- /dev/null +++ b/app/templates/index.html @@ -0,0 +1,241 @@ + + + + + + + Infrastructure Map + + + +
+

Infrastructure Map

+ Auto-refreshes every 60s +
+ +
+ {% for group_name, servers in groups.items() %} +
+

{{ group_name }}

+
+ {% for server in servers %} + {% set d = server.details or {} %} + {% set sys = d.get('system', {}) if d.get('system') else {} %} + {% set cpu = d.get('cpu', {}) if d.get('cpu') else {} %} + {% set mem = d.get('memory', {}) if d.get('memory') else {} %} + {% set cpu_pct = cpu.get('usage_percent', '0')|float %} + {% set mem_pct = mem.get('usage_percent', '0')|float %} + {% set disk_usages = d.get('disk_usage', []) if d.get('disk_usage') else [] %} + {% set root_disk = namespace(pct=0.0) %} + {% for du in disk_usages %} + {% if du.get('mount') == '/' %} + {% set root_disk.pct = du.get('usage_percent', '0')|float %} + {% endif %} + {% endfor %} + {% if root_disk.pct == 0.0 and disk_usages|length > 0 %} + {% set root_disk.pct = disk_usages[0].get('usage_percent', '0')|float %} + {% endif %} + +
+
+
+ + {{ server.hostname }} +
+
{{ server.primary_ip or 'No IP' }}
+
{{ sys.get('os_pretty', '') }}
+ + {% if server.is_online %} +
+
+ CPU +
+
+
+ {{ '%.0f'|format(cpu_pct) }}% +
+
+ RAM +
+
+
+ {{ '%.0f'|format(mem_pct) }}% +
+
+ DISK +
+
+
+ {{ '%.0f'|format(root_disk.pct) }}% +
+
+ {% else %} +
Unreachable
+ {% endif %} +
+ + + +
+ {% endfor %} +
+
+ {% else %} +
+

No servers configured

+

Edit infrastructure.conf to add your servers.

+
+ {% endfor %} +
+ + + + diff --git a/backup.sh b/backup.sh new file mode 100755 index 0000000..ccf7e9d --- /dev/null +++ b/backup.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +_check_required_env_vars "CONTAINER_NAME" "DATA_VOLUME" "BACKUP_FILE" "TEMP_DIR" + +mkdir -p "${TEMP_DIR}/backup" + +docker run --rm -v "${DATA_VOLUME}":/source -v "${TEMP_DIR}/backup":/backup \ + debian bash -c "tar -czf /backup/data.tgz -C /source ." || _die "Failed to backup data volume" + +tar -czf "${BACKUP_FILE}" -C "${TEMP_DIR}/backup" . || _die "Failed to create backup archive" + +echo "Backup completed successfully" diff --git a/config/infrastructure.conf b/config/infrastructure.conf new file mode 100644 index 0000000..b51c4ad --- /dev/null +++ b/config/infrastructure.conf @@ -0,0 +1,20 @@ +# Infrastructure Map Configuration +# +# Format: +# GROUPNAME +# USERNAME@SERVERNAME +# USERNAME@SERVERNAME +# ... +# +# GROUPNAME is a freeform label to group servers together. +# USERNAME@SERVERNAME is the SSH user and hostname/IP to connect to. +# +# Example: +# +# Production +# root@prod-web-01 +# root@prod-db-01 +# +# Development +# deploy@dev-01 +# deploy@dev-02 diff --git a/config/service.env b/config/service.env new file mode 100644 index 0000000..444fe32 --- /dev/null +++ b/config/service.env @@ -0,0 +1,18 @@ +CONTAINER_NAME=infmap +SSH_USER="root" + +# Web UI port (HTTP) +WEB_PORT=8080 + +# Path to SSH private key on the host (used to connect to monitored servers) +SSH_KEY_PATH=/root/.ssh/id_ed25519 + +# Collection settings +COLLECTION_INTERVAL=300 +MAX_CONCURRENT_SSH=5 + +# MySQL credentials +MYSQL_ROOT_PASSWORD=changeme_root +MYSQL_DATABASE=infmap +MYSQL_USER=infmap +MYSQL_PASSWORD=changeme diff --git a/destroy.sh b/destroy.sh new file mode 100755 index 0000000..1ac3747 --- /dev/null +++ b/destroy.sh @@ -0,0 +1,13 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" "DATA_VOLUME" + +echo "WARNING: This will PERMANENTLY DELETE all data for ${CONTAINER_NAME}" + +docker compose -p "${CONTAINER_NAME}" down 2>/dev/null || true +_remove_volume "$DATA_VOLUME" + +echo "Service and all data destroyed" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..1498cf4 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,40 @@ +services: + db: + image: mysql:8 + environment: + MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD} + MYSQL_DATABASE: ${MYSQL_DATABASE} + MYSQL_USER: ${MYSQL_USER} + MYSQL_PASSWORD: ${MYSQL_PASSWORD} + volumes: + - db_data:/var/lib/mysql + restart: unless-stopped + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + interval: 10s + timeout: 5s + retries: 10 + + app: + build: ./app + ports: + - "${WEB_PORT}:5000" + environment: + MYSQL_HOST: db + MYSQL_DATABASE: ${MYSQL_DATABASE} + MYSQL_USER: ${MYSQL_USER} + MYSQL_PASSWORD: ${MYSQL_PASSWORD} + COLLECTION_INTERVAL: ${COLLECTION_INTERVAL} + MAX_CONCURRENT_SSH: ${MAX_CONCURRENT_SSH} + volumes: + - ${SSH_KEY_PATH}:/app/ssh_key:ro + - ${CONFIG_PATH}/infrastructure.conf:/app/infrastructure.conf:ro + depends_on: + db: + condition: service_healthy + restart: unless-stopped + +volumes: + db_data: + external: true + name: ${CONTAINER_NAME}_db_data diff --git a/install-pre.sh b/install-pre.sh new file mode 100755 index 0000000..8f48d3c --- /dev/null +++ b/install-pre.sh @@ -0,0 +1,10 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" + +docker compose -p "${CONTAINER_NAME}" pull || echo "Warning: pre-pull failed, install.sh will retry" + +echo "Pre-install complete" diff --git a/install.sh b/install.sh new file mode 100755 index 0000000..ab48f7c --- /dev/null +++ b/install.sh @@ -0,0 +1,22 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" "WEB_PORT" "SSH_KEY_PATH" "MYSQL_ROOT_PASSWORD" "MYSQL_DATABASE" "MYSQL_USER" "MYSQL_PASSWORD" "DATA_VOLUME" +_check_docker_installed || _die "Docker test failed" + +# Check SSH key exists +[ -f "${SSH_KEY_PATH}" ] || _die "SSH key not found at ${SSH_KEY_PATH}" + +# Check infrastructure.conf exists +[ -f "${CONFIG_PATH}/infrastructure.conf" ] || _die "infrastructure.conf not found at ${CONFIG_PATH}/infrastructure.conf" + +# Create data volume +docker volume create "${DATA_VOLUME}" 2>/dev/null || true + +bash ./stop.sh || true +docker compose -p "${CONTAINER_NAME}" up -d --build || _die "Failed to start services" + +echo "Installation of ${CONTAINER_NAME} complete" +echo "Web UI available at http://localhost:${WEB_PORT}" diff --git a/logs.sh b/logs.sh new file mode 100755 index 0000000..046158d --- /dev/null +++ b/logs.sh @@ -0,0 +1,8 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" + +docker compose -p "${CONTAINER_NAME}" logs "$@" diff --git a/ports.sh b/ports.sh new file mode 100755 index 0000000..9156dbb --- /dev/null +++ b/ports.sh @@ -0,0 +1,5 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +_check_required_env_vars "WEB_PORT" + +echo "${WEB_PORT}" diff --git a/restore.sh b/restore.sh new file mode 100755 index 0000000..640383d --- /dev/null +++ b/restore.sh @@ -0,0 +1,13 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +_check_required_env_vars "CONTAINER_NAME" "DATA_VOLUME" "BACKUP_FILE" "TEMP_DIR" + +mkdir -p "${TEMP_DIR}/restore" +tar -xzf "${BACKUP_FILE}" -C "${TEMP_DIR}/restore" || _die "Failed to extract backup archive" + +docker volume rm "${DATA_VOLUME}" 2>/dev/null || true +docker volume create "${DATA_VOLUME}" || _die "Failed to create data volume" +docker run --rm -v "${DATA_VOLUME}":/target -v "${TEMP_DIR}/restore":/backup \ + debian bash -c "tar -xzf /backup/data.tgz -C /target" || _die "Failed to restore data volume" + +echo "Restore completed successfully" diff --git a/start.sh b/start.sh new file mode 100755 index 0000000..89bf3b6 --- /dev/null +++ b/start.sh @@ -0,0 +1,11 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" "DATA_VOLUME" + +docker volume create "${DATA_VOLUME}" 2>/dev/null || true +docker compose -p "${CONTAINER_NAME}" up -d || _die "Failed to start services" + +echo "${CONTAINER_NAME} started" diff --git a/status.sh b/status.sh new file mode 100755 index 0000000..a7c4c13 --- /dev/null +++ b/status.sh @@ -0,0 +1,23 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +_check_required_env_vars "CONTAINER_NAME" + +APP_CONTAINER="${CONTAINER_NAME}-app-1" + +if ! docker ps -a --format "{{.Names}}" | grep -q "^${APP_CONTAINER}$"; then + echo "Unknown" + exit 0 +fi + +STATE=$(docker inspect -f '{{.State.Status}}' "$APP_CONTAINER" 2>/dev/null) +case "$STATE" in + running) + echo "Running" + ;; + exited|stopped) + echo "Stopped" + ;; + *) + echo "Unknown" + ;; +esac diff --git a/stop.sh b/stop.sh new file mode 100755 index 0000000..db8fb17 --- /dev/null +++ b/stop.sh @@ -0,0 +1,10 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" + +docker compose -p "${CONTAINER_NAME}" down 2>/dev/null || true + +echo "${CONTAINER_NAME} stopped" diff --git a/template_info.env b/template_info.env new file mode 100644 index 0000000..2e2ef0f --- /dev/null +++ b/template_info.env @@ -0,0 +1,5 @@ +REQUIRES_HOST_ROOT=false +REQUIRES_DOCKER=true +REQUIRES_DOCKER_ROOT=false + +DATA_VOLUME="${CONTAINER_NAME}_db_data" diff --git a/uninstall.sh b/uninstall.sh new file mode 100755 index 0000000..e5a71ab --- /dev/null +++ b/uninstall.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source "${AGENT_PATH}/common.sh" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +_check_required_env_vars "CONTAINER_NAME" + +docker compose -p "${CONTAINER_NAME}" down || _die "Failed to stop services" + +# DO NOT remove volumes here! Data is preserved for reinstallation. +echo "Uninstallation of ${CONTAINER_NAME} complete" +echo "Note: Data volumes have been preserved. To remove all data, use destroy.sh"