﻿#!/bin/bash

################################################################################
# Supabase Auto Deploy Script for 4C8G Server
# Author: Claude Code
# Description: Automated deployment script with optimizations for 4-core 8GB memory servers
################################################################################

set -e  # Exit on error
set -o pipefail  # Fail if any command in a pipeline fails

# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Configuration
INSTALL_DIR="${INSTALL_DIR:-/opt/supabase}"
SWAP_SIZE="${SWAP_SIZE:-4G}"
DOMAIN="${DOMAIN:-localhost}"
EMAIL="${EMAIL:-admin@example.com}"
SUPABASE_ARCHIVE_PATH="${SUPABASE_ARCHIVE_PATH:-}"

################################################################################
# Helper Functions
################################################################################

log_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

log_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

log_warn() {
    echo -e "${YELLOW}[WARN]${NC} $1"
}

log_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

git_clone_with_retries() {
    local repo_url="$1"
    local target_dir="$2"
    local max_attempts="${3:-3}"
    local delay_secs="${4:-10}"

    local attempt=1
    while [[ $attempt -le $max_attempts ]]; do
        log_info "Cloning ${repo_url} (attempt ${attempt}/${max_attempts})..."
        if git clone --depth 1 --progress "$repo_url" "$target_dir"; then
            return 0
        fi
        log_warn "Clone failed from ${repo_url} (attempt ${attempt}), retrying in ${delay_secs}s..."
        sleep "$delay_secs"
        attempt=$((attempt + 1))
        rm -rf "$target_dir" 2>/dev/null || true
    done

    return 1
}

extract_supabase_archive() {
    local archive_path="$1"
    local target_dir="$2"
    local temp_dir

    if [[ ! -f "$archive_path" ]]; then
        log_error "Supabase archive not found: $archive_path"
        exit 1
    fi

    log_info "Extracting Supabase archive: $archive_path"

    temp_dir=$(mktemp -d)

    if ! tar -xf "$archive_path" -C "$temp_dir"; then
        log_error "Failed to extract archive $archive_path"
        rm -rf "$temp_dir"
        exit 1
    fi

    local docker_dir
    docker_dir=$(find "$temp_dir" -maxdepth 3 -type d -path "*/docker" | head -n1 || true)

    if [[ -z "$docker_dir" ]]; then
        log_error "Archive does not contain a Supabase docker directory"
        rm -rf "$temp_dir"
        exit 1
    fi

    local supabase_root
    supabase_root=$(dirname "$docker_dir")

    rm -rf "$target_dir"
    mkdir -p "$(dirname "$target_dir")"
    mv "$supabase_root" "$target_dir"

    rm -rf "$temp_dir"
    log_success "Supabase extracted to $target_dir"
}

git_clone_with_retries() {
    local repo_url="$1"
    local target_dir="$2"
    local max_attempts="${3:-3}"
    local delay_secs="${4:-10}"

    local attempt=1
    while [[ $attempt -le $max_attempts ]]; do
        log_info "Cloning ${repo_url} (attempt ${attempt}/${max_attempts})..."
        if git clone --depth 1 --progress "$repo_url" "$target_dir"; then
            return 0
        fi
        log_warn "Clone failed from ${repo_url} (attempt ${attempt}), retrying in ${delay_secs}s..."
        sleep "$delay_secs"
        attempt=$((attempt + 1))
        rm -rf "$target_dir" 2>/dev/null || true
    done

    return 1
}

check_root() {
    if [[ $EUID -ne 0 ]]; then
        log_error "This script must be run as root"
        exit 1
    fi
}

check_system_requirements() {
    log_info "Checking system requirements..."

    # Check CPU
    CPU_CORES=$(nproc)
    if [[ $CPU_CORES -lt 4 ]]; then
        log_error "At least 4 CPU cores required, found: $CPU_CORES"
        exit 1
    fi
    log_success "CPU cores: $CPU_CORES"

    # Check memory
    TOTAL_MEM=$(free -g | awk '/^Mem:/{print $2}')
    if [[ $TOTAL_MEM -lt 7 ]]; then
        log_error "At least 8GB memory required, found: ${TOTAL_MEM}GB"
        exit 1
    fi
    log_success "Total memory: ${TOTAL_MEM}GB"

    # Check disk space
    DISK_FREE=$(df -BG / | awk 'NR==2{print $4}' | sed 's/G//')
    if [[ $DISK_FREE -lt 20 ]]; then
        log_warn "Low disk space: ${DISK_FREE}GB free (20GB+ recommended)"
    else
        log_success "Disk space: ${DISK_FREE}GB free"
    fi
}

install_dependencies() {
    log_info "Installing dependencies..."

    # Update system
    apt-get update -qq
    apt-get install -y curl git wget jq openssl

    log_success "Dependencies installed"
}

install_docker() {
    # Skip Docker installation if SKIP_DOCKER_INSTALL is set to true
    if [[ "${SKIP_DOCKER_INSTALL}" == "true" ]]; then
        log_info "Skipping Docker installation (SKIP_DOCKER_INSTALL=true)"
        # Verify Docker is already installed
        if ! command -v docker &> /dev/null; then
            log_error "Docker not found but SKIP_DOCKER_INSTALL=true, please install Docker manually"
            exit 1
        fi
        log_success "Docker already installed: $(docker --version)"
        return
    fi

    if command -v docker &> /dev/null; then
        log_info "Docker already installed: $(docker --version)"
        return
    fi

    log_info "Installing Docker..."

    # Remove old versions completely (including configs and data)
    log_info "Removing old Docker versions and residues..."

    # Uninstall old packages
    apt-get remove -y docker docker-engine docker.io containerd runc docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null || true

    # Remove Docker directories and configs
    rm -rf /var/lib/docker /var/lib/containerd /etc/docker /usr/libexec/docker /usr/local/bin/docker* /etc/apt/sources.list.d/docker.list 2>/dev/null || true

    # Clean up apt cache
    apt-get autoremove -y 2>/dev/null || true
    apt-get clean 2>/dev/null || true
    apt-get update -qq 2>/dev/null

    # Install Docker (using China optimized mirrors + official fallback)
    DOCKER_INSTALL_SUCCESS=0

    # Try 1: Daocloud with Aliyun mirror (most reliable in China)
    log_info "Installing Docker from Daocloud with Aliyun mirror..."
    if curl -fsSL https://get.daocloud.io/docker | bash -s docker --mirror Aliyun; then
        DOCKER_INSTALL_SUCCESS=1
    else
        log_warn "Failed to install Docker from Daocloud with Aliyun mirror..."

        # Try 2: Aliyun official installation script (fallback)
        log_info "Trying Aliyun official Docker installation..."
        if curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 2>/dev/null \
            && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
            && apt-get update -qq 2>/dev/null \
            && apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null; then
            DOCKER_INSTALL_SUCCESS=1
        else
            log_warn "Failed to install Docker from Aliyun..."

            # Try 3: Huawei Cloud mirror (last fallback)
            log_info "Trying Huawei Cloud Docker installation..."
            if curl -fsSL https://repo.huaweicloud.com/docker-ce/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 2>/dev/null \
                && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://repo.huaweicloud.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
                && apt-get update -qq 2>/dev/null \
                && apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null; then
                DOCKER_INSTALL_SUCCESS=1
            else
                log_warn "Failed to install Docker from Huawei Cloud..."
            fi
        fi
    fi

    if [ $DOCKER_INSTALL_SUCCESS -eq 0 ]; then
        # Try 4: Official Docker convenience script
        log_info "Trying official Docker installation script..."
        if curl -fsSL https://get.docker.com | sh; then
            DOCKER_INSTALL_SUCCESS=1
        fi
    fi

    if [ $DOCKER_INSTALL_SUCCESS -eq 0 ]; then
        log_error "Docker installation failed: Network connection issue"
        log_info "Please check network connectivity or try manual installation"
        exit 1
    fi

    if ! command -v docker &> /dev/null; then
        log_error "Docker command still not available after installation attempts"
        exit 1
    fi

    # Start and enable Docker
    log_info "Starting Docker service..."
    systemctl daemon-reload >/dev/null 2>&1
    systemctl start docker >/dev/null 2>&1 || {
        log_error "Failed to start Docker service"
        exit 1
    }
    systemctl enable docker >/dev/null 2>&1

    log_success "Docker installed: $(docker --version)"
}

install_docker_compose() {
    if command -v docker compose version &> /dev/null; then
        log_info "Docker Compose already installed: $(docker compose version)"
        return
    fi

    log_info "Installing Docker Compose..."

    # Docker Compose V2 is included with Docker
    if ! docker compose version &> /dev/null; then
        log_error "Docker Compose not available"
        exit 1
    fi

    log_success "Docker Compose ready"
}

setup_swap() {
    if swapon --show | grep -q "/swapfile"; then
        log_info "Swap already configured"
        return
    fi

    log_info "Setting up ${SWAP_SIZE} swap file..."

    # Create swap file
    fallocate -l $SWAP_SIZE /swapfile || dd if=/dev/zero of=/swapfile bs=1M count=4096
    chmod 600 /swapfile
    mkswap /swapfile
    swapon /swapfile

    # Make swap permanent
    if ! grep -q "/swapfile" /etc/fstab; then
        echo '/swapfile none swap sw 0 0' >> /etc/fstab
    fi

    log_success "Swap configured: $(swapon --show)"
}

optimize_system() {
    log_info "Optimizing system parameters..."

    # Kernel parameters
    cat > /etc/sysctl.d/99-supabase.conf <<EOF
# Memory management
vm.swappiness=10
vm.overcommit_memory=1
vm.max_map_count=262144

# Network optimization
net.core.somaxconn=1024
net.ipv4.tcp_max_syn_backlog=2048
net.ipv4.ip_local_port_range=10000 65535

# File descriptors
fs.file-max=2097152
EOF

    sysctl -p /etc/sysctl.d/99-supabase.conf

    # Docker logging and registry mirrors for China (优先使用国内镜像源)
    mkdir -p /etc/docker
    cat > /etc/docker/daemon.json <<EOF
{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  },
  "registry-mirrors": [
    "https://docker.mirrors.ustc.edu.cn",
    "https://mirror.ccs.tencentyun.com",
    "http://hub-mirror.c.163.com",
    "https://mirror.baidubce.com",
    "https://reg-mirror.qiniu.com",
    "https://dockerhub.azk8s.cn"
  ],
  "max-concurrent-downloads": 10,
  "max-concurrent-uploads": 5
}
EOF

    systemctl restart docker

    log_success "System optimized"
}

generate_secrets() {
    log_info "Generating secure secrets..."

    # Generate random strings
    JWT_SECRET=$(openssl rand -base64 32)
    ANON_KEY=$(openssl rand -base64 32)
    SERVICE_ROLE_KEY=$(openssl rand -base64 32)
    POSTGRES_PASSWORD=$(openssl rand -base64 32 | tr -d "=+/")
    DASHBOARD_PASSWORD=$(openssl rand -base64 16 | tr -d "=+/")
    SECRET_KEY_BASE=$(openssl rand -base64 64)
    VAULT_ENC_KEY=$(openssl rand -base64 32)
    PG_META_CRYPTO_KEY=$(openssl rand -base64 32)
    LOGFLARE_PUBLIC_TOKEN=$(openssl rand -base64 32)
    LOGFLARE_PRIVATE_TOKEN=$(openssl rand -base64 32)

    # Export for later use
    export JWT_SECRET ANON_KEY SERVICE_ROLE_KEY POSTGRES_PASSWORD DASHBOARD_PASSWORD
    export SECRET_KEY_BASE VAULT_ENC_KEY PG_META_CRYPTO_KEY
    export LOGFLARE_PUBLIC_TOKEN LOGFLARE_PRIVATE_TOKEN

    log_success "Secrets generated"
}

download_supabase() {
    log_info "Downloading Supabase (可能需要数分钟)..."

    # Create directory
    mkdir -p "$INSTALL_DIR"
    cd "$INSTALL_DIR"

    # Clone or update repository
    # 默认优先使用国内 Gitee 镜像，可通过环境变量覆盖
    SUPABASE_GITEE_MIRROR="${SUPABASE_GITEE_MIRROR:-https://gitee.com/mirrors/supabase.git}"
    SUPABASE_CN_MIRROR="${SUPABASE_CN_MIRROR:-https://github.com.cnpmjs.org/supabase/supabase.git}"
    SUPABASE_REPO="${SUPABASE_REPO:-https://github.com/supabase/supabase.git}"
    # Use GitHub mirror for China if needed (set SUPABASE_REPO environment variable)
    # Example: SUPABASE_REPO=https://github.com.cnpmjs.org/supabase/supabase.git

    if [[ -n "$SUPABASE_ARCHIVE_PATH" ]]; then
        log_info "使用本地 Supabase 压缩包：$SUPABASE_ARCHIVE_PATH"
        extract_supabase_archive "$SUPABASE_ARCHIVE_PATH" "$INSTALL_DIR/supabase"
    else
        if [[ -d "supabase/.git" ]]; then
            log_info "检测到已有仓库，正在拉取最新代码（请稍候）..."
            cd supabase
            git -c core.forceCheckout=true pull --progress --ff-only || {
                log_warn "git pull 失败，尝试清理后重新克隆..."
                cd ..
                rm -rf supabase
                git_clone_with_retries "$SUPABASE_GITEE_MIRROR" supabase || \
                    git_clone_with_retries "$SUPABASE_CN_MIRROR" supabase || \
                    git_clone_with_retries "$SUPABASE_REPO" supabase || {
                        log_error "无法从任何镜像克隆 Supabase，请检查网络或手动设置 SUPABASE_GITEE_MIRROR/SUPABASE_REPO"
                        exit 1
                    }
            }
            cd "$INSTALL_DIR"
        else
            log_info "Cloning Supabase repository (优先使用国内镜像)..."
            git_clone_with_retries "$SUPABASE_GITEE_MIRROR" supabase || \
                git_clone_with_retries "$SUPABASE_CN_MIRROR" supabase || \
                git_clone_with_retries "$SUPABASE_REPO" supabase || {
                    log_error "无法从任何镜像克隆 Supabase，请检查网络或手动设置 SUPABASE_GITEE_MIRROR/SUPABASE_REPO"
                    exit 1
                }
        fi
    fi

    # Copy docker files
    rm -rf docker-config
    mkdir -p docker-config
    cp -r supabase/docker/* docker-config/
    cd docker-config

    log_success "Supabase downloaded to $INSTALL_DIR"
}

create_env_file() {
    log_info "Creating .env configuration..."

    cat > .env <<EOF
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############

POSTGRES_PASSWORD="${POSTGRES_PASSWORD}"
JWT_SECRET="${JWT_SECRET}"
ANON_KEY="${ANON_KEY}"
SERVICE_ROLE_KEY="${SERVICE_ROLE_KEY}"
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD="${DASHBOARD_PASSWORD}"

############
# Database
############

POSTGRES_HOST=db
POSTGRES_PORT=5432
POSTGRES_DB=postgres

############
# API
############

API_EXTERNAL_URL=http://${DOMAIN}:8000
SUPABASE_PUBLIC_URL=http://${DOMAIN}:8000

# PGRST
PGRST_DB_SCHEMAS=public,storage,graphql_public

############
# Auth
############

SITE_URL=http://${DOMAIN}:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false

# Email
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL="${EMAIL}"
SMTP_HOST=
SMTP_PORT=587
SMTP_USER=
SMTP_PASS=
SMTP_SENDER_NAME="Supabase"

# Phone
ENABLE_PHONE_SIGNUP=false
ENABLE_PHONE_AUTOCONFIRM=false

ENABLE_ANONYMOUS_USERS=false

############
# Studio
############

STUDIO_DEFAULT_ORGANIZATION="Default Organization"
STUDIO_DEFAULT_PROJECT="Default Project"

# OpenAI (optional)
OPENAI_API_KEY=

############
# Functions
############

FUNCTIONS_VERIFY_JWT=false

############
# Pooler
############

POOLER_TENANT_ID=default-tenant
POOLER_DEFAULT_POOL_SIZE=10
POOLER_MAX_CLIENT_CONN=50
POOLER_DB_POOL_SIZE=10
POOLER_PROXY_PORT_TRANSACTION=6543

############
# Logflare
############

LOGFLARE_PUBLIC_ACCESS_TOKEN="${LOGFLARE_PUBLIC_TOKEN}"
LOGFLARE_PRIVATE_ACCESS_TOKEN="${LOGFLARE_PRIVATE_TOKEN}"
LOGFLARE_API_KEY="${LOGFLARE_PRIVATE_TOKEN}"

############
# Advanced
############

SECRET_KEY_BASE="${SECRET_KEY_BASE}"
VAULT_ENC_KEY="${VAULT_ENC_KEY}"
PG_META_CRYPTO_KEY="${PG_META_CRYPTO_KEY}"

############
# Ports
############

KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443

############
# Docker
############

DOCKER_SOCKET_LOCATION=/var/run/docker.sock

############
# Mailer URLs
############

MAILER_URLPATHS_INVITE=/auth/v1/verify
MAILER_URLPATHS_CONFIRMATION=/auth/v1/verify
MAILER_URLPATHS_RECOVERY=/auth/v1/verify
MAILER_URLPATHS_EMAIL_CHANGE=/auth/v1/verify

############
# Image Proxy
############

IMGPROXY_ENABLE_WEBP_DETECTION=true
EOF

    log_success ".env file created"
}

create_override_file() {
    log_info "Creating docker-compose.override.yml for resource optimization..."

    cat > docker-compose.override.yml <<'EOF'
services:
  studio:
    deploy:
      resources:
        limits:
          cpus: '1.0'
          memory: 600M
        reservations:
          memory: 400M
    environment:
      NODE_OPTIONS: "--max-old-space-size=512"

  kong:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 500M
        reservations:
          memory: 300M

  auth:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 500M
        reservations:
          memory: 300M

  rest:
    deploy:
      resources:
        limits:
          cpus: '0.6'
          memory: 400M
        reservations:
          memory: 250M

  realtime:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 800M
        reservations:
          memory: 500M
    environment:
      ERL_MAX_PORTS: "8192"
      RLIMIT_NOFILE: "8192"

  storage:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 500M
        reservations:
          memory: 300M

  imgproxy:
    deploy:
      resources:
        limits:
          cpus: '0.6'
          memory: 400M
        reservations:
          memory: 250M

  meta:
    deploy:
      resources:
        limits:
          cpus: '0.6'
          memory: 400M
        reservations:
          memory: 250M

  functions:
    deploy:
      resources:
        limits:
          cpus: '0.6'
          memory: 400M
        reservations:
          memory: 250M

  analytics:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 800M
        reservations:
          memory: 500M

  db:
    deploy:
      resources:
        limits:
          cpus: '2.0'
          memory: 3500M
        reservations:
          memory: 2500M
    command:
      - postgres
      - -c
      - config_file=/etc/postgresql/postgresql.conf
      - -c
      - log_min_messages=fatal
      - -c
      - shared_buffers=1GB
      - -c
      - effective_cache_size=3GB
      - -c
      - work_mem=16MB
      - -c
      - maintenance_work_mem=256MB
      - -c
      - max_connections=150
      - -c
      - max_wal_size=2GB
      - -c
      - checkpoint_completion_target=0.9
      - -c
      - random_page_cost=1.1
      - -c
      - effective_io_concurrency=200

  vector:
    deploy:
      resources:
        limits:
          cpus: '0.6'
          memory: 300M
        reservations:
          memory: 150M

  supavisor:
    deploy:
      resources:
        limits:
          cpus: '0.8'
          memory: 500M
        reservations:
          memory: 300M
EOF

    log_success "docker-compose.override.yml created"
}

start_services() {
    log_info "Starting Supabase services..."

    # Pull images with retry mechanism
    log_info "Pulling Docker images (this may take a while, will retry on failure)..."
    local max_retries=3
    local retry_count=0
    local pull_success=0

    while [[ $retry_count -lt $max_retries ]]; do
        log_info "Pulling images (attempt $((retry_count + 1))/$max_retries)..."
        if docker compose pull 2>&1 | tee /tmp/docker-pull.log; then
            # Check if there were any errors in the output
            if grep -qi "error\|failed\|timeout" /tmp/docker-pull.log; then
                retry_count=$((retry_count + 1))
                if [[ $retry_count -lt $max_retries ]]; then
                    log_warn "Docker pull encountered errors, retrying in 10 seconds..."
                    sleep 10
                    # Restart Docker daemon to refresh registry connections
                    systemctl restart docker
                    sleep 5
                fi
            else
                pull_success=1
                break
            fi
        else
            retry_count=$((retry_count + 1))
            if [[ $retry_count -lt $max_retries ]]; then
                log_warn "Docker pull failed (attempt $retry_count/$max_retries), retrying in 10 seconds..."
                sleep 10
                # Restart Docker daemon to refresh registry connections
                systemctl restart docker
                sleep 5
            fi
        fi
    done

    if [[ $pull_success -eq 0 ]]; then
        log_error "Failed to pull Docker images after $max_retries attempts"
        log_info "Last error log:"
        tail -n 20 /tmp/docker-pull.log 2>/dev/null || true
        log_warn "You can try to pull images manually later with: docker compose pull"
        log_info "Attempting to start services with existing images..."
    fi

    # Start services
    log_info "Starting containers..."
    if ! docker compose up -d; then
        log_error "Failed to start containers"
        log_info "Checking container status..."
        docker compose ps
        exit 1
    fi

    log_success "Services started"
}

wait_for_health() {
    log_info "Waiting for services to become healthy..."

    local max_wait=300
    local elapsed=0
    local interval=5

    while [[ $elapsed -lt $max_wait ]]; do
        local unhealthy=$(docker compose ps --format json | jq -r 'select(.Health != "healthy") | .Service' 2>/dev/null | wc -l)

        if [[ $unhealthy -eq 0 ]]; then
            log_success "All services are healthy"
            return 0
        fi

        log_info "Waiting for $unhealthy service(s) to become healthy... (${elapsed}s/${max_wait}s)"
        sleep $interval
        elapsed=$((elapsed + interval))
    done

    log_warn "Some services may not be healthy yet, checking status..."
    docker compose ps
    return 1
}

create_management_scripts() {
    log_info "Creating management scripts..."

    # Status script
    cat > "$INSTALL_DIR/status.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
echo "=== Supabase Status ==="
docker compose ps
echo ""
echo "=== Resource Usage ==="
docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}"
EOF
    chmod +x "$INSTALL_DIR/status.sh"

    # Stop script
    cat > "$INSTALL_DIR/stop.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
echo "Stopping Supabase services..."
docker compose stop
echo "Services stopped"
EOF
    chmod +x "$INSTALL_DIR/stop.sh"

    # Start script
    cat > "$INSTALL_DIR/start.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
echo "Starting Supabase services..."
docker compose start
echo "Services started"
EOF
    chmod +x "$INSTALL_DIR/start.sh"

    # Restart script
    cat > "$INSTALL_DIR/restart.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
echo "Restarting Supabase services..."
docker compose down
docker compose up -d
echo "Services restarted"
EOF
    chmod +x "$INSTALL_DIR/restart.sh"

    # Logs script
    cat > "$INSTALL_DIR/logs.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
SERVICE=${1:-}
if [ -z "$SERVICE" ]; then
    echo "Usage: $0 <service-name>"
    echo "Available services:"
    docker compose ps --services
    exit 1
fi
docker compose logs -f "$SERVICE"
EOF
    chmod +x "$INSTALL_DIR/logs.sh"

    # Backup script
    cat > "$INSTALL_DIR/backup.sh" <<'EOF'
#!/bin/bash
cd "$(dirname "$0")/docker-config"
BACKUP_DIR="${BACKUP_DIR:-/backup/supabase}"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)

# Create backup directory if it doesn't exist
mkdir -p "$BACKUP_DIR" || { echo "ERROR: Failed to create backup directory $BACKUP_DIR"; exit 1; }

echo "Backing up PostgreSQL database..."
docker exec supabase-db pg_dump -U postgres postgres | gzip > "$BACKUP_DIR/db_$TIMESTAMP.sql.gz"
if [ $? -ne 0 ]; then
    echo "ERROR: Database backup failed"
    exit 1
fi

echo "Backing up volumes..."
tar czf "$BACKUP_DIR/volumes_$TIMESTAMP.tar.gz" volumes/ 2>/dev/null
if [ $? -ne 0 ]; then
    echo "WARNING: Volume backup failed (may not exist yet)"
fi

# Keep only the last 10 backups to save disk space
echo "Cleaning up old backups..."
find "$BACKUP_DIR" -name "*.sql.gz" -o -name "*.tar.gz" | sort -r | tail -n +11 | xargs rm -f 2>/dev/null

echo "Backup completed successfully: $BACKUP_DIR"
ls -lh "$BACKUP_DIR"
EOF
    chmod +x "$INSTALL_DIR/backup.sh"

    log_success "Management scripts created in $INSTALL_DIR"
}

save_credentials() {
    log_info "Saving credentials..."

    cat > "$INSTALL_DIR/CREDENTIALS.txt" <<EOF
################################################################################
# Supabase Credentials
# Generated: $(date)
# KEEP THIS FILE SECURE!
################################################################################

Dashboard URL: http://${DOMAIN}:8000
Dashboard Username: supabase
Dashboard Password: $DASHBOARD_PASSWORD

API URL: http://${DOMAIN}:8000

Anon Key: $ANON_KEY
Service Role Key: $SERVICE_ROLE_KEY

PostgreSQL:
  Host: ${DOMAIN}
  Port: 5432 (Session) / 6543 (Transaction)
  Database: postgres
  Username: postgres
  Password: $POSTGRES_PASSWORD

Connection Strings:
  Session:     postgresql://postgres:$POSTGRES_PASSWORD@${DOMAIN}:5432/postgres
  Transaction: postgresql://postgres:$POSTGRES_PASSWORD@${DOMAIN}:6543/postgres

JWT Secret: $JWT_SECRET

################################################################################
# Important Notes
################################################################################

1. Change Dashboard password immediately after first login
2. Configure SMTP settings in .env for email functionality
3. Update SITE_URL and API_EXTERNAL_URL for production
4. Enable SSL/TLS with reverse proxy (nginx/caddy)
5. Set up regular backups using backup.sh script

################################################################################
EOF

    chmod 600 "$INSTALL_DIR/CREDENTIALS.txt"

    log_success "Credentials saved to $INSTALL_DIR/CREDENTIALS.txt"
}

print_summary() {
    echo ""
    echo "════════════════════════════════════════════════════════════════"
    log_success "Supabase deployment completed successfully!"
    echo "════════════════════════════════════════════════════════════════"
    echo ""
    echo "📍 Installation Directory: $INSTALL_DIR"
    echo "🌐 Dashboard URL: http://${DOMAIN}:8000"
    echo "👤 Username: supabase"
    echo "🔑 Password: $DASHBOARD_PASSWORD"
    echo ""
    echo "📝 Credentials saved to: $INSTALL_DIR/CREDENTIALS.txt"
    echo ""
    echo "🛠️  Management Commands:"
    echo "  Status:  $INSTALL_DIR/status.sh"
    echo "  Stop:    $INSTALL_DIR/stop.sh"
    echo "  Start:   $INSTALL_DIR/start.sh"
    echo "  Restart: $INSTALL_DIR/restart.sh"
    echo "  Logs:    $INSTALL_DIR/logs.sh <service-name>"
    echo "  Backup:  $INSTALL_DIR/backup.sh"
    echo ""
    echo "📊 Resource Usage:"
    docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" | head -n 15
    echo ""
    echo "⚠️  Important Next Steps:"
    echo "  1. Review credentials in $INSTALL_DIR/CREDENTIALS.txt"
    echo "  2. Configure SMTP in $INSTALL_DIR/docker-config/.env"
    echo "  3. Set up SSL/TLS with reverse proxy for production"
    echo "  4. Configure regular backups"
    echo "  5. Monitor resource usage: $INSTALL_DIR/status.sh"
    echo ""
    echo "════════════════════════════════════════════════════════════════"
}

################################################################################
# Main Deployment Flow
################################################################################

main() {
    log_info "Starting Supabase deployment for 4C8G server..."
    echo ""

    # Pre-flight checks
    check_root
    check_system_requirements

    # System setup
    install_dependencies
    install_docker
    install_docker_compose
    setup_swap
    optimize_system

    # Supabase setup
    generate_secrets
    download_supabase
    create_env_file
    create_override_file

    # Deploy
    start_services
    wait_for_health || log_warn "Some services may need more time to start"

    # Post-deployment
    create_management_scripts
    save_credentials

    # Summary
    print_summary
}

# Run main function
main "$@"
