#!/bin/bash
# ArXiv Subscription Platform Deployment Script

set -e

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
DEPLOY_ENV="${1:-development}"
BACKUP_DIR="/var/backups/arxiv-platform"
LOG_FILE="/var/log/arxiv-platform/deploy.log"

# Functions
log() {
    echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE"
}

warn() {
    echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1" | tee -a "$LOG_FILE"
}

error() {
    echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" | tee -a "$LOG_FILE"
    exit 1
}

info() {
    echo -e "${BLUE}[$(date '+%Y-%m-%d %H:%M:%S')] INFO:${NC} $1" | tee -a "$LOG_FILE"
}

check_prerequisites() {
    log "Checking prerequisites..."
    
    # Check if running as root for production
    if [[ "$DEPLOY_ENV" == "production" && $EUID -ne 0 ]]; then
        error "Production deployment must be run as root"
    fi
    
    # Check Docker
    if ! command -v docker &> /dev/null; then
        error "Docker is not installed"
    fi
    
    # Check Docker Compose
    if ! docker compose version &> /dev/null; then
        error "Docker Compose is not installed"
    fi
    
    # Check available disk space (minimum 10GB)
    available_space=$(df / | awk 'NR==2 {print $4}')
    if [[ $available_space -lt 10485760 ]]; then
        warn "Low disk space: $(($available_space / 1024 / 1024))GB available"
    fi
    
    # Check memory (minimum 4GB)
    total_mem=$(grep MemTotal /proc/meminfo | awk '{print $2}')
    if [[ $total_mem -lt 4194304 ]]; then
        warn "Low memory: $(($total_mem / 1024 / 1024))GB available"
    fi
    
    log "Prerequisites check completed"
}

setup_directories() {
    log "Setting up directory structure..."
    
    local dirs=(
        "/var/log/arxiv-platform"
        "/var/lib/arxiv-platform"
        "/var/lib/arxiv-platform/postgres"
        "/var/lib/arxiv-platform/redis"
        "/var/lib/arxiv-platform/ai_models"
        "/var/lib/arxiv-platform/prometheus"
        "/var/lib/arxiv-platform/grafana"
        "/var/lib/arxiv-platform/alertmanager"
        "$BACKUP_DIR"
        "$PROJECT_ROOT/logs"
        "$PROJECT_ROOT/data"
    )
    
    for dir in "${dirs[@]}"; do
        if [[ ! -d "$dir" ]]; then
            mkdir -p "$dir"
            info "Created directory: $dir"
        fi
    done
    
    # Set permissions
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        chown -R 1000:1000 /var/lib/arxiv-platform/
        chown -R 1000:1000 "$PROJECT_ROOT/logs"
        chown -R 1000:1000 "$PROJECT_ROOT/data"
    fi
    
    log "Directory setup completed"
}

check_environment() {
    log "Checking environment configuration..."
    
    local env_file="$PROJECT_ROOT/.env"
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        env_file="$PROJECT_ROOT/.env.production"
    fi
    
    if [[ ! -f "$env_file" ]]; then
        error "Environment file not found: $env_file"
    fi
    
    # Check required variables
    local required_vars=(
        "DATABASE_URL"
        "JWT_SECRET"
        "SMTP_HOST"
        "SMTP_USER"
        "SMTP_PASSWORD"
    )
    
    source "$env_file"
    
    for var in "${required_vars[@]}"; do
        if [[ -z "${!var}" ]]; then
            error "Required environment variable not set: $var"
        fi
    done
    
    # Check for default/insecure values in production
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        if [[ "$JWT_SECRET" == "your-super-secret-jwt-key-change-in-production" ]]; then
            error "JWT_SECRET must be changed for production deployment"
        fi
        
        if [[ "$POSTGRES_PASSWORD" == "arxiv_password" ]]; then
            error "Default PostgreSQL password must be changed for production"
        fi
    fi
    
    log "Environment configuration check completed"
}

backup_existing() {
    log "Creating backup of existing deployment..."
    
    local timestamp=$(date +%Y%m%d_%H%M%S)
    local backup_path="$BACKUP_DIR/backup_$timestamp"
    
    mkdir -p "$backup_path"
    
    # Backup volumes if they exist
    if docker volume ls | grep -q arxiv_postgres_data; then
        info "Backing up PostgreSQL data..."
        docker run --rm \
            -v arxiv_postgres_data:/source:ro \
            -v "$backup_path":/backup \
            alpine tar czf /backup/postgres_data.tar.gz -C /source .
    fi
    
    if docker volume ls | grep -q arxiv_redis_data; then
        info "Backing up Redis data..."
        docker run --rm \
            -v arxiv_redis_data:/source:ro \
            -v "$backup_path":/backup \
            alpine tar czf /backup/redis_data.tar.gz -C /source .
    fi
    
    # Backup configuration
    cp "$PROJECT_ROOT/.env"* "$backup_path/" 2>/dev/null || true
    cp "$PROJECT_ROOT/docker-compose.yml" "$backup_path/" 2>/dev/null || true
    
    info "Backup created at: $backup_path"
    log "Backup completed"
}

pull_images() {
    log "Pulling Docker images..."
    
    cd "$PROJECT_ROOT"
    
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        docker compose -f docker-compose.yml -f docker-compose.prod.yml pull
    else
        docker compose pull
    fi
    
    log "Docker images pulled"
}

deploy_services() {
    log "Deploying services..."
    
    cd "$PROJECT_ROOT"
    
    # Copy appropriate environment file
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        cp .env.production .env
    fi
    
    # Deploy services
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d --remove-orphans
    else
        docker compose up -d --remove-orphans
    fi
    
    log "Services deployment completed"
}

wait_for_services() {
    log "Waiting for services to be ready..."
    
    local max_attempts=60
    local attempt=1
    
    while [[ $attempt -le $max_attempts ]]; do
        info "Health check attempt $attempt/$max_attempts"
        
        # Check main API
        if curl -f -s http://localhost:8000/health > /dev/null 2>&1; then
            log "Main API is healthy"
            break
        fi
        
        if [[ $attempt -eq $max_attempts ]]; then
            error "Services failed to become healthy within timeout"
        fi
        
        sleep 10
        ((attempt++))
    done
    
    # Additional service checks
    local services=(
        "backend:8000"
        "email_service:8001"
    )
    
    for service in "${services[@]}"; do
        local name=$(echo "$service" | cut -d: -f1)
        local port=$(echo "$service" | cut -d: -f2)
        
        if curl -f -s "http://localhost:$port/health" > /dev/null 2>&1; then
            info "$name service is healthy"
        else
            warn "$name service health check failed"
        fi
    done
    
    log "Service health checks completed"
}

run_database_migrations() {
    log "Running database migrations..."
    
    # Wait for database to be ready
    local max_attempts=30
    local attempt=1
    
    while [[ $attempt -le $max_attempts ]]; do
        if docker compose exec -T postgres pg_isready -U arxiv_user > /dev/null 2>&1; then
            break
        fi
        
        if [[ $attempt -eq $max_attempts ]]; then
            error "Database failed to become ready"
        fi
        
        sleep 2
        ((attempt++))
    done
    
    # Run migrations if needed
    if docker compose exec -T backend python -c "import alembic" > /dev/null 2>&1; then
        info "Running Alembic migrations..."
        docker compose exec -T backend python -m alembic upgrade head
    else
        info "No migrations to run"
    fi
    
    log "Database migrations completed"
}

setup_monitoring() {
    log "Setting up monitoring..."
    
    # Create Grafana dashboards directory if it doesn't exist
    local grafana_dashboards="$PROJECT_ROOT/monitoring/grafana/dashboards"
    if [[ ! -d "$grafana_dashboards" ]]; then
        mkdir -p "$grafana_dashboards"
    fi
    
    # Wait for Grafana to be ready
    local max_attempts=30
    local attempt=1
    
    while [[ $attempt -le $max_attempts ]]; do
        if curl -f -s http://admin:admin@localhost:3001/api/health > /dev/null 2>&1; then
            break
        fi
        
        sleep 2
        ((attempt++))
    done
    
    info "Monitoring setup completed"
    log "Monitoring configuration completed"
}

setup_ssl() {
    if [[ "$DEPLOY_ENV" != "production" ]]; then
        return 0
    fi
    
    log "Setting up SSL certificates..."
    
    local domain="${DOMAIN:-localhost}"
    
    if [[ "$domain" != "localhost" ]] && command -v certbot &> /dev/null; then
        info "Obtaining Let's Encrypt certificate for $domain"
        
        # Stop nginx temporarily
        docker compose stop nginx
        
        # Obtain certificate
        certbot certonly --standalone -d "$domain" --non-interactive --agree-tos --email "admin@$domain"
        
        # Copy certificates to nginx directory
        mkdir -p "$PROJECT_ROOT/nginx/ssl"
        cp "/etc/letsencrypt/live/$domain/fullchain.pem" "$PROJECT_ROOT/nginx/ssl/"
        cp "/etc/letsencrypt/live/$domain/privkey.pem" "$PROJECT_ROOT/nginx/ssl/"
        
        # Start nginx again
        docker compose start nginx
        
        info "SSL certificate installed"
    else
        warn "Skipping SSL setup - certbot not available or domain is localhost"
    fi
    
    log "SSL setup completed"
}

verify_deployment() {
    log "Verifying deployment..."
    
    local endpoints=(
        "http://localhost:3000"
        "http://localhost:8000/health"
        "http://localhost:8000/docs"
    )
    
    for endpoint in "${endpoints[@]}"; do
        if curl -f -s "$endpoint" > /dev/null; then
            info "✓ $endpoint is accessible"
        else
            error "✗ $endpoint is not accessible"
        fi
    done
    
    # Check Docker container status
    local failed_containers=$(docker compose ps --format json | jq -r 'select(.State != "running") | .Name' | wc -l)
    if [[ $failed_containers -gt 0 ]]; then
        error "Some containers are not running. Check with: docker compose ps"
    fi
    
    info "All containers are running"
    log "Deployment verification completed"
}

cleanup() {
    log "Cleaning up..."
    
    # Remove old images
    docker image prune -f
    
    # Remove old volumes
    docker volume prune -f
    
    log "Cleanup completed"
}

show_summary() {
    log "Deployment Summary"
    echo
    info "Environment: $DEPLOY_ENV"
    info "Frontend: http://localhost:3000"
    info "API: http://localhost:8000"
    info "API Documentation: http://localhost:8000/docs"
    
    if [[ "$DEPLOY_ENV" == "production" ]]; then
        info "Grafana: http://localhost:8080/grafana/ (admin/\$GRAFANA_PASSWORD)"
        info "Prometheus: http://localhost:8080/prometheus/"
    else
        info "Grafana: http://localhost:3001 (admin/admin)"
        info "Prometheus: http://localhost:9090"
    fi
    
    echo
    log "Deployment completed successfully!"
    echo
    info "To view logs: docker compose logs -f"
    info "To check status: docker compose ps"
    info "To stop services: docker compose down"
    echo
}

# Main execution
main() {
    log "Starting ArXiv Subscription Platform deployment..."
    log "Environment: $DEPLOY_ENV"
    
    check_prerequisites
    setup_directories
    check_environment
    backup_existing
    pull_images
    deploy_services
    wait_for_services
    run_database_migrations
    setup_monitoring
    setup_ssl
    verify_deployment
    cleanup
    show_summary
}

# Script execution
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
    # Ensure log directory exists
    mkdir -p "$(dirname "$LOG_FILE")"
    
    # Run main function
    main "$@" 2>&1 | tee -a "$LOG_FILE"
fi