#!/bin/bash
# Database Production Optimization Script for ArXiv Subscription Platform

set -e

# Configuration
DB_HOST="${DB_HOST:-localhost}"
DB_PORT="${DB_PORT:-5432}"
DB_NAME="${DB_NAME:-arxiv_platform}"
DB_USER="${DB_USER:-postgres}"
DB_PASSWORD="${DB_PASSWORD:-}"
PGPASSWORD="$DB_PASSWORD"
export PGPASSWORD

# Performance configuration
MAX_CONNECTIONS="${MAX_CONNECTIONS:-200}"
SHARED_BUFFERS="${SHARED_BUFFERS:-256MB}"
EFFECTIVE_CACHE_SIZE="${EFFECTIVE_CACHE_SIZE:-1GB}"
WORK_MEM="${WORK_MEM:-16MB}"
MAINTENANCE_WORK_MEM="${MAINTENANCE_WORK_MEM:-256MB}"
WAL_BUFFERS="${WAL_BUFFERS:-16MB}"
CHECKPOINT_COMPLETION_TARGET="${CHECKPOINT_COMPLETION_TARGET:-0.9}"
RANDOM_PAGE_COST="${RANDOM_PAGE_COST:-1.1}"  # SSD optimized

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

log() {
    echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1"
}

warn() {
    echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
}

error() {
    echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1"
    exit 1
}

info() {
    echo -e "${BLUE}[$(date '+%Y-%m-%d %H:%M:%S')] INFO:${NC} $1"
}

check_connection() {
    log "Testing database connection..."
    
    if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT version();" > /dev/null 2>&1; then
        log "Database connection successful"
    else
        error "Failed to connect to database"
    fi
}

get_db_size() {
    log "Checking database size and statistics..."
    
    # Get database size
    DB_SIZE=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c "
        SELECT pg_size_pretty(pg_database_size('$DB_NAME'));"
    )
    
    # Get table sizes
    TABLE_SIZES=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c "
        SELECT schemaname, tablename, 
               pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size,
               pg_total_relation_size(schemaname||'.'||tablename) as bytes
        FROM pg_tables 
        WHERE schemaname = 'public' 
        ORDER BY bytes DESC 
        LIMIT 10;"
    )
    
    info "Database size: $DB_SIZE"
    info "Top 10 largest tables:"
    echo "$TABLE_SIZES"
}

optimize_postgresql_config() {
    log "Applying PostgreSQL performance optimizations..."
    
    # Apply configuration changes
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << EOF
-- Performance optimization settings
ALTER SYSTEM SET max_connections = '$MAX_CONNECTIONS';
ALTER SYSTEM SET shared_buffers = '$SHARED_BUFFERS';
ALTER SYSTEM SET effective_cache_size = '$EFFECTIVE_CACHE_SIZE';
ALTER SYSTEM SET work_mem = '$WORK_MEM';
ALTER SYSTEM SET maintenance_work_mem = '$MAINTENANCE_WORK_MEM';
ALTER SYSTEM SET wal_buffers = '$WAL_BUFFERS';
ALTER SYSTEM SET checkpoint_completion_target = '$CHECKPOINT_COMPLETION_TARGET';
ALTER SYSTEM SET random_page_cost = '$RANDOM_PAGE_COST';
ALTER SYSTEM SET effective_io_concurrency = '200';
ALTER SYSTEM SET default_statistics_target = '100';

-- Logging configuration for monitoring
ALTER SYSTEM SET log_min_duration_statement = '1000';  -- Log queries > 1s
ALTER SYSTEM SET log_checkpoints = 'on';
ALTER SYSTEM SET log_connections = 'on';
ALTER SYSTEM SET log_disconnections = 'on';
ALTER SYSTEM SET log_lock_waits = 'on';
ALTER SYSTEM SET log_temp_files = '100000';  -- Log temp files > 100MB

-- WAL configuration for performance and durability
ALTER SYSTEM SET wal_level = 'replica';
ALTER SYSTEM SET max_wal_senders = '10';
ALTER SYSTEM SET archive_mode = 'on';
ALTER SYSTEM SET archive_command = 'cp %p /var/lib/postgresql/archive/%f';

-- Autovacuum tuning
ALTER SYSTEM SET autovacuum = 'on';
ALTER SYSTEM SET autovacuum_max_workers = '4';
ALTER SYSTEM SET autovacuum_naptime = '30s';
ALTER SYSTEM SET autovacuum_vacuum_threshold = '50';
ALTER SYSTEM SET autovacuum_analyze_threshold = '50';
ALTER SYSTEM SET autovacuum_vacuum_scale_factor = '0.1';
ALTER SYSTEM SET autovacuum_analyze_scale_factor = '0.05';

-- Reload configuration
SELECT pg_reload_conf();
EOF

    log "PostgreSQL configuration optimized"
    warn "Note: Some settings require a database restart to take effect"
}

create_indexes() {
    log "Creating performance indexes..."
    
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF'
-- Create indexes for better performance
-- Note: These will be created CONCURRENTLY to avoid blocking

-- Users table indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_active 
    ON users(email, is_active) WHERE is_active = true;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_created_at 
    ON users(created_at);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_last_login 
    ON users(last_login_at) WHERE last_login_at IS NOT NULL;

-- Papers table indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_papers_submission_date 
    ON papers(submission_date DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_papers_primary_category 
    ON papers(primary_category);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_papers_title_trgm 
    ON papers USING gin(title gin_trgm_ops);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_papers_abstract_trgm 
    ON papers USING gin(abstract gin_trgm_ops);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_papers_authors_search 
    ON papers USING gin(to_tsvector('english', title || ' ' || abstract));

-- Authors table indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_authors_name_trgm 
    ON authors USING gin(name gin_trgm_ops);

-- Paper-author relationship indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_paper_authors_paper_id 
    ON paper_authors(paper_id);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_paper_authors_author_id 
    ON paper_authors(author_id);

-- User interactions indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_paper_interactions_user_id_date 
    ON user_paper_interactions(user_id, interaction_date DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_paper_interactions_paper_id 
    ON user_paper_interactions(paper_id);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_paper_interactions_type_date 
    ON user_paper_interactions(interaction_type, interaction_date DESC);

-- Subscriptions indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_subscriptions_user_id_active 
    ON user_subscriptions(user_id, is_active) WHERE is_active = true;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_subscriptions_type_target 
    ON user_subscriptions(subscription_type, subscription_target) WHERE is_active = true;

-- Email system indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_email_digests_user_id_date 
    ON email_digests(user_id, created_at DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_email_delivery_logs_status_date 
    ON email_delivery_logs(delivery_status, created_at DESC);

-- Recommendations indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_recommendations_user_id_expires 
    ON user_recommendations(user_id, expires_at) WHERE expires_at > CURRENT_TIMESTAMP;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_recommendations_score 
    ON user_recommendations(recommendation_score DESC) WHERE expires_at > CURRENT_TIMESTAMP;

-- Analytics indexes
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_analytics_date 
    ON user_analytics(date DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_paper_analytics_date 
    ON paper_analytics(date DESC);

-- Audit logs indexes (for GDPR compliance)
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_logs_timestamp 
    ON audit_logs(timestamp DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_logs_user_id_timestamp 
    ON audit_logs(user_id, timestamp DESC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_logs_table_name 
    ON audit_logs(table_name, timestamp DESC);
EOF

    log "Performance indexes created"
}

analyze_tables() {
    log "Analyzing tables to update statistics..."
    
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF'
-- Analyze all tables to update query planner statistics
ANALYZE;

-- Show statistics for key tables
SELECT 
    schemaname,
    tablename,
    n_tup_ins as "Inserts",
    n_tup_upd as "Updates", 
    n_tup_del as "Deletes",
    n_live_tup as "Live Rows",
    n_dead_tup as "Dead Rows",
    last_vacuum,
    last_autovacuum,
    last_analyze,
    last_autoanalyze
FROM pg_stat_user_tables 
WHERE schemaname = 'public'
ORDER BY n_live_tup DESC;
EOF

    log "Table analysis completed"
}

setup_monitoring() {
    log "Setting up database monitoring..."
    
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF'
-- Enable query statistics collection
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;

-- Create monitoring views
CREATE OR REPLACE VIEW db_performance_summary AS
SELECT 
    'Database Size' as metric,
    pg_size_pretty(pg_database_size(current_database())) as value
UNION ALL
SELECT 
    'Active Connections',
    count(*)::text
FROM pg_stat_activity 
WHERE state = 'active'
UNION ALL
SELECT 
    'Idle Connections',
    count(*)::text
FROM pg_stat_activity 
WHERE state = 'idle'
UNION ALL
SELECT 
    'Cache Hit Ratio',
    round(
        sum(blks_hit) * 100.0 / 
        NULLIF(sum(blks_hit) + sum(blks_read), 0), 2
    )::text || '%'
FROM pg_stat_database
WHERE datname = current_database();

-- View for slow queries
CREATE OR REPLACE VIEW slow_queries AS
SELECT 
    query,
    calls,
    total_time,
    mean_time,
    rows,
    100.0 * shared_blks_hit / NULLIF(shared_blks_hit + shared_blks_read, 0) AS hit_percent
FROM pg_stat_statements 
WHERE mean_time > 100  -- queries slower than 100ms
ORDER BY mean_time DESC 
LIMIT 20;

-- View for table bloat estimation
CREATE OR REPLACE VIEW table_bloat_check AS
SELECT 
    schemaname,
    tablename,
    n_dead_tup,
    n_live_tup,
    CASE 
        WHEN n_live_tup > 0 
        THEN round(100.0 * n_dead_tup / n_live_tup, 2) 
        ELSE 0 
    END as bloat_percent,
    last_vacuum,
    last_autovacuum
FROM pg_stat_user_tables
WHERE n_dead_tup > 1000
ORDER BY bloat_percent DESC;
EOF

    log "Database monitoring views created"
}

run_maintenance() {
    log "Running database maintenance tasks..."
    
    # Vacuum and analyze critical tables
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF'
-- Vacuum critical tables
VACUUM (ANALYZE, VERBOSE) users;
VACUUM (ANALYZE, VERBOSE) papers;
VACUUM (ANALYZE, VERBOSE) user_paper_interactions;
VACUUM (ANALYZE, VERBOSE) user_subscriptions;
VACUUM (ANALYZE, VERBOSE) email_digests;
VACUUM (ANALYZE, VERBOSE) user_recommendations;

-- Reindex if necessary (check for index bloat)
-- REINDEX INDEX CONCURRENTLY idx_papers_submission_date;
-- REINDEX INDEX CONCURRENTLY idx_users_email_active;

-- Update table statistics
ANALYZE;
EOF

    log "Database maintenance completed"
}

check_performance() {
    log "Checking database performance metrics..."
    
    psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF'
-- Performance summary
SELECT * FROM db_performance_summary;

-- Connection stats
SELECT 
    state,
    count(*) as connections
FROM pg_stat_activity 
GROUP BY state;

-- Top 10 slowest queries
SELECT 
    left(query, 50) as query_snippet,
    calls,
    round(total_time::numeric, 2) as total_time_ms,
    round(mean_time::numeric, 2) as avg_time_ms
FROM pg_stat_statements 
ORDER BY mean_time DESC 
LIMIT 10;

-- Table sizes and activity
SELECT 
    t.tablename,
    pg_size_pretty(pg_total_relation_size(t.schemaname||'.'||t.tablename)) as size,
    s.n_tup_ins as inserts,
    s.n_tup_upd as updates,
    s.n_tup_del as deletes,
    s.n_live_tup as live_rows
FROM pg_tables t
LEFT JOIN pg_stat_user_tables s ON t.tablename = s.relname
WHERE t.schemaname = 'public'
ORDER BY pg_total_relation_size(t.schemaname||'.'||t.tablename) DESC
LIMIT 10;
EOF

    log "Performance check completed"
}

create_maintenance_scripts() {
    log "Creating automated maintenance scripts..."
    
    # Daily maintenance script
    cat > /usr/local/bin/daily-db-maintenance.sh << 'EOF'
#!/bin/bash
# Daily database maintenance script

set -e

# Configuration
DB_HOST="${DB_HOST:-localhost}"
DB_PORT="${DB_PORT:-5432}"
DB_NAME="${DB_NAME:-arxiv_platform}"
DB_USER="${DB_USER:-postgres}"
export PGPASSWORD="${DB_PASSWORD}"

LOG_FILE="/var/log/arxiv-platform/db-maintenance.log"

exec > >(tee -a "$LOG_FILE")
exec 2>&1

echo "[$(date)] Starting daily database maintenance..."

# Update statistics for query planner
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "ANALYZE;"

# Check for tables that need vacuuming
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'SQL'
-- Vacuum tables with high dead tuple ratio
DO $$
DECLARE
    r RECORD;
BEGIN
    FOR r IN 
        SELECT schemaname, tablename 
        FROM pg_stat_user_tables 
        WHERE n_dead_tup > 1000 
          AND (n_dead_tup::float / GREATEST(n_live_tup, 1) > 0.1)
    LOOP
        EXECUTE 'VACUUM ANALYZE ' || quote_ident(r.schemaname) || '.' || quote_ident(r.tablename);
        RAISE NOTICE 'Vacuumed table %.%', r.schemaname, r.tablename;
    END LOOP;
END $$;
SQL

# Log performance metrics
echo "Performance metrics:"
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT * FROM db_performance_summary;"

# Check for long-running queries
echo "Long-running queries:"
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "
SELECT pid, now() - pg_stat_activity.query_start AS duration, query 
FROM pg_stat_activity 
WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes'
AND state = 'active';"

echo "[$(date)] Daily database maintenance completed"
EOF

    chmod +x /usr/local/bin/daily-db-maintenance.sh
    
    # Weekly maintenance script
    cat > /usr/local/bin/weekly-db-maintenance.sh << 'EOF'
#!/bin/bash
# Weekly database maintenance script

set -e

# Configuration
DB_HOST="${DB_HOST:-localhost}"
DB_PORT="${DB_PORT:-5432}"
DB_NAME="${DB_NAME:-arxiv_platform}"
DB_USER="${DB_USER:-postgres}"
export PGPASSWORD="${DB_PASSWORD}"

LOG_FILE="/var/log/arxiv-platform/db-weekly-maintenance.log"

exec > >(tee -a "$LOG_FILE")
exec 2>&1

echo "[$(date)] Starting weekly database maintenance..."

# Full vacuum for critical tables
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'SQL'
VACUUM (FULL, ANALYZE) audit_logs;
VACUUM (FULL, ANALYZE) email_delivery_logs;
VACUUM (FULL, ANALYZE) api_usage_logs;
SQL

# Check and rebuild indexes if needed
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'SQL'
-- Check for index bloat and rebuild if necessary
SELECT 
    indexname,
    pg_size_pretty(pg_relation_size(indexname::regclass)) as size
FROM pg_indexes 
WHERE schemaname = 'public'
ORDER BY pg_relation_size(indexname::regclass) DESC;
SQL

# Update table statistics with higher precision
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "
ALTER SYSTEM SET default_statistics_target = '1000';
SELECT pg_reload_conf();
ANALYZE;
ALTER SYSTEM SET default_statistics_target = '100';
SELECT pg_reload_conf();"

echo "[$(date)] Weekly database maintenance completed"
EOF

    chmod +x /usr/local/bin/weekly-db-maintenance.sh
    
    # Add to crontab
    if command -v crontab >/dev/null 2>&1; then
        (crontab -l 2>/dev/null; echo "0 2 * * * /usr/local/bin/daily-db-maintenance.sh") | crontab -
        (crontab -l 2>/dev/null; echo "0 3 * * 0 /usr/local/bin/weekly-db-maintenance.sh") | crontab -
        info "Maintenance scripts added to crontab"
    fi
    
    log "Maintenance scripts created"
}

show_summary() {
    log "Database Optimization Summary"
    echo
    info "Configuration optimized for production workload"
    info "Performance indexes created"
    info "Monitoring views established"
    info "Automated maintenance scripts installed"
    echo
    info "Key optimizations applied:"
    info "- Connection pooling: $MAX_CONNECTIONS max connections"
    info "- Memory: $SHARED_BUFFERS shared buffers, $EFFECTIVE_CACHE_SIZE cache size"
    info "- Query optimization: Updated statistics, full-text search indexes"
    info "- Monitoring: pg_stat_statements enabled, performance views created"
    info "- Maintenance: Daily/weekly automated tasks scheduled"
    echo
    warn "Remember to:"
    warn "1. Restart PostgreSQL to apply all configuration changes"
    warn "2. Monitor performance metrics regularly"
    warn "3. Review slow query logs"
    warn "4. Adjust configuration based on actual workload"
    echo
    log "Database optimization completed successfully!"
}

# Main execution
main() {
    log "Starting database optimization for ArXiv Subscription Platform..."
    
    check_connection
    get_db_size
    optimize_postgresql_config
    create_indexes
    analyze_tables
    setup_monitoring
    run_maintenance
    check_performance
    create_maintenance_scripts
    show_summary
}

# Script execution
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
    main "$@"
fi