#!/bin/bash

# 在线智能考试系统项目初始化脚本
# 作者: AI-SOES团队
# 版本: 1.0

set -e

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# 打印带颜色的消息
print_message() {
    local color=$1
    local message=$2
    echo -e "${color}${message}${NC}"
}

print_success() {
    print_message $GREEN "✅ $1"
}

print_error() {
    print_message $RED "❌ $1"
}

print_warning() {
    print_message $YELLOW "⚠️  $1"
}

print_info() {
    print_message $BLUE "ℹ️  $1"
}

# 显示横幅
show_banner() {
    echo -e "${BLUE}"
    cat << 'EOF'
██╗  ██╗ ██████╗ ██████╗ ███████╗██████╗
██║ ██╔╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗
█████╔╝ ██║   ██║██║  ██║█████╗  ██████╔╝
██╔═██╗ ██║   ██║██║  ██║██╔══╝  ██╔══██╗
██║  ██╗╚██████╔╝██████╔╝███████╗██║  ██║
╚═╝  ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝  ╚═╝

    在线智能考试系统 (AI-SOES)
       Online Intelligent Examination System
EOF
    echo -e "${NC}"
}

# 检查系统要求
check_requirements() {
    print_info "📋 检查系统要求..."

    local missing_requirements=()

    # 检查Docker
    if ! command -v docker &> /dev/null; then
        missing_requirements+=("Docker")
    else
        print_success "Docker 已安装"
    fi

    # 检查Docker Compose
    if ! command -v docker-compose &> /dev/null; then
        missing_requirements+=("Docker Compose")
    else
        print_success "Docker Compose 已安装"
    fi

    # 检查Node.js
    if ! command -v node &> /dev/null; then
        missing_requirements+=("Node.js")
    else
        print_success "Node.js 已安装"
    fi

    # 检查Python
    if ! command -v python3 &> /dev/null; then
        missing_requirements+=("Python 3")
    else
        print_success "Python 3 已安装"
    fi

    # 检查Git
    if ! command -v git &> /dev/null; then
        missing_requirements+=("Git")
    else
        print_success "Git 已安装"
    fi

    # 如果有缺失的要求，显示安装指南
    if [ ${#missing_requirements[@]} -ne 0 ]; then
        print_error "以下工具未安装:"
        for req in "${missing_requirements[@]}"; do
            echo "  - $req"
        done
        echo ""
        print_info "请参考以下安装指南:"
        echo "Docker: https://docs.docker.com/get-docker/"
        echo "Node.js: https://nodejs.org/"
        echo "Python 3: https://www.python.org/downloads/"
        echo "Git: https://git-scm.com/"
        exit 1
    fi

    print_success "系统要求检查通过"
}

# 创建项目结构
create_project_structure() {
    print_info "📁 创建项目目录结构..."

    # 主要目录
    mkdir -p {frontend,backend,ai-service,docs,scripts,config,logs,tests,data}
    mkdir -p {docs/api,docs/design,docs/deployment,docs/images}
    mkdir -p {config/nginx,config/mysql}
    mkdir -p {logs/backend,logs/frontend,logs/ai-service,logs/nginx}
    mkdir -p {tests/unit,tests/integration,tests/e2e}
    mkdir -p {data/models,data/datasets,data/backups}

    # 后端目录结构
    mkdir -p backend/{apps,common,config,static,media,requirements}
    mkdir -p backend/apps/{users,question_bank,exam_paper,examination,grading,ai_integration}
    for app in users question_bank exam_paper examination grading ai_integration; do
        mkdir -p backend/apps/$app/{migrations,tests}
    done

    # 前端目录结构
    mkdir -p frontend/{src,public,tests,build}
    mkdir -p frontend/src/{api,assets,components,router,stores,types,utils,views}
    mkdir -p frontend/src/assets/{images,styles,icons}
    mkdir -p frontend/src/components/{common,exam,charts,layout}
    mkdir -p frontend/src/views/{auth,teacher,student,admin}
    mkdir -p frontend/tests/{unit,integration,e2e}

    # AI服务目录结构
    mkdir -p ai-service/{app,models,data,tests,scripts,logs}
    mkdir -p ai-service/app/{api,models,services,utils,config}
    mkdir -p ai-service/models/{question_generator,answer_grader,pretrained}
    mkdir -p ai-service/data/{datasets,preprocessed,augmented}

    print_success "项目目录结构创建完成"
}

# 初始化Git仓库
init_git_repository() {
    print_info "🔧 初始化Git仓库..."

    # 创建.gitignore文件
    cat > .gitignore << 'EOF'
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
Pipfile.lock

# poetry
poetry.lock

# pdm
.pdm.toml

# PEP 582
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
.idea/

# VS Code
.vscode/

# Node.js
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage/
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# TypeScript v1 declaration files
typings/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional stylelint cache
.stylelintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next
out

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
public

# vue-press build output
.vuepress/dist

# vuepress v2.x temp and cache directory
.temp
.cache

# Docusaurus cache and generated files
.docusaurus

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

# Stores VSCode versions used for testing VSCode extensions
.vscode-test

# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*

# Docker
.dockerignore

# Logs
logs/
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Coverage directory used by tools like istanbul
coverage/
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# TypeScript v1 declaration files
typings/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
public

# Storybook build outputs
.out
.storybook-out

# Temporary folders
tmp/
temp/

# Editor directories and files
.vscode/
.idea/
*.swp
*.swo
*~

# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

# AI/ML Models
*.pth
*.pkl
*.joblib
*.h5
*.pt
*.bin

# Data files
*.csv
*.json
*.xlsx
*.xls
!package.json
!package-lock.json
!requirements*.txt

# Database
*.db
*.sqlite
*.sqlite3

# Media files
media/
static/
uploads/

# Backup files
*.bak
*.backup
backups/

# Custom
config/production.py
config/staging.py
.env.local
.env.production
EOF

    # 创建.gitattributes
    cat > .gitattributes << 'EOF'
# Auto detect text files and perform LF normalization
* text=auto

# Explicitly declare text files you want to always be normalized and converted
# to native line endings on checkout.
*.js text eol=lf
*.jsx text eol=lf
*.ts text eol=lf
*.tsx text eol=lf
*.json text eol=lf
*.css text eol=lf
*.scss text eol=lf
*.html text eol=lf
*.md text eol=lf
*.yml text eol=lf
*.yaml text eol=lf
*.py text eol=lf

# Declare files that will always have CRLF line endings on checkout.
*.bat text eol=crlf
*.cmd text eol=crlf

# Denote all files that are truly binary and should not be modified.
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.ico binary
*.svg binary
*.woff binary
*.woff2 binary
*.ttf binary
*.eot binary
*.pdf binary
*.zip binary
*.gz binary
*.tar binary
*.rar binary
*.7z binary

# Large files
*.mp3 filter=lfs diff=lfs merge=lfs -text
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.mov filter=lfs diff=lfs merge=lfs -text
*.avi filter=lfs diff=lfs merge=lfs -text
EOF

    # 初始化Git仓库
    if [ ! -d .git ]; then
        git init
        git add .
        git commit -m "🎉 Initial commit: AI-SOES project structure created"
        print_success "Git仓库初始化完成"
    else
        print_warning "Git仓库已存在"
    fi
}

# 创建Docker配置
setup_docker() {
    print_info "🐳 配置Docker环境..."

    # 创建主docker-compose.yml
    cat > docker-compose.yml << 'EOF'
version: '3.8'

services:
  # MySQL数据库
  mysql:
    image: mysql:8.0
    container_name: soes-mysql
    restart: unless-stopped
    environment:
      MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:-rootpassword}
      MYSQL_DATABASE: ${DB_NAME:-soes_db}
      MYSQL_USER: ${DB_USER:-soes_user}
      MYSQL_PASSWORD: ${DB_PASSWORD:-soes_password}
    ports:
      - "${DB_PORT:-3306}:3306"
    volumes:
      - mysql_data:/var/lib/mysql
      - ./config/mysql/init.sql:/docker-entrypoint-initdb.d/init.sql
      - ./logs/mysql:/var/log/mysql
    networks:
      - soes-network
    healthcheck:
      test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
      timeout: 20s
      retries: 10

  # Redis缓存和消息队列
  redis:
    image: redis:7-alpine
    container_name: soes-redis
    restart: unless-stopped
    command: redis-server --appendonly yes
    ports:
      - "${REDIS_PORT:-6379}:6379"
    volumes:
      - redis_data:/data
      - ./config/redis/redis.conf:/usr/local/etc/redis/redis.conf
    networks:
      - soes-network
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      timeout: 10s
      retries: 5

  # 后端服务
  backend:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: soes-backend
    restart: unless-stopped
    environment:
      - DEBUG=${DEBUG:-False}
      - SECRET_KEY=${SECRET_KEY}
      - DATABASE_URL=mysql://${DB_USER:-soes_user}:${DB_PASSWORD:-soes_password}@mysql:3306/${DB_NAME:-soes_db}
      - REDIS_URL=redis://redis:6379/0
      - ALLOWED_HOSTS=${ALLOWED_HOSTS:-localhost,127.0.0.1}
    ports:
      - "${BACKEND_PORT:-8000}:8000"
    depends_on:
      mysql:
        condition: service_healthy
      redis:
        condition: service_healthy
    volumes:
      - ./backend:/app
      - backend_static:/app/static
      - backend_media:/app/media
      - ./logs/backend:/app/logs
    networks:
      - soes-network
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8000/api/v1/health/"]
      timeout: 30s
      retries: 5

  # AI服务
  ai-service:
    build:
      context: ./ai-service
      dockerfile: Dockerfile
    container_name: soes-ai-service
    restart: unless-stopped
    environment:
      - DEBUG=${DEBUG:-False}
      - REDIS_URL=redis://redis:6379/1
      - MODEL_PATH=/app/models
    ports:
      - "${AI_SERVICE_PORT:-5000}:5000"
    depends_on:
      redis:
        condition: service_healthy
    volumes:
      - ./ai-service:/app
      - ai_models:/app/models
      - ./logs/ai-service:/app/logs
    networks:
      - soes-network
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:5000/api/v1/health"]
      timeout: 30s
      retries: 5

  # 前端服务
  frontend:
    build:
      context: ./frontend
      dockerfile: Dockerfile
    container_name: soes-frontend
    restart: unless-stopped
    environment:
      - NODE_ENV=${NODE_ENV:-production}
    ports:
      - "${FRONTEND_PORT:-3000}:80"
    depends_on:
      - backend
      - ai-service
    networks:
      - soes-network

  # Nginx反向代理
  nginx:
    image: nginx:alpine
    container_name: soes-nginx
    restart: unless-stopped
    ports:
      - "${HTTP_PORT:-80}:80"
      - "${HTTPS_PORT:-443}:443"
    volumes:
      - ./config/nginx/nginx.conf:/etc/nginx/nginx.conf
      - ./config/nginx/sites-available:/etc/nginx/sites-available
      - backend_static:/var/www/static
      - backend_media:/var/www/media
      - ./logs/nginx:/var/log/nginx
      - ./ssl:/etc/nginx/ssl
    depends_on:
      - frontend
      - backend
    networks:
      - soes-network
    healthcheck:
      test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
      timeout: 10s
      retries: 5

  # Celery工作进程
  celery-worker:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: soes-celery-worker
    restart: unless-stopped
    command: celery -A config worker --loglevel=info --concurrency=4
    environment:
      - DEBUG=${DEBUG:-False}
      - SECRET_KEY=${SECRET_KEY}
      - DATABASE_URL=mysql://${DB_USER:-soes_user}:${DB_PASSWORD:-soes_password}@mysql:3306/${DB_NAME:-soes_db}
      - REDIS_URL=redis://redis:6379/0
    depends_on:
      mysql:
        condition: service_healthy
      redis:
        condition: service_healthy
    volumes:
      - ./backend:/app
      - backend_media:/app/media
      - ./logs/backend:/app/logs
    networks:
      - soes-network

  # Celery调度器
  celery-beat:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: soes-celery-beat
    restart: unless-stopped
    command: celery -A config beat --loglevel=info
    environment:
      - DEBUG=${DEBUG:-False}
      - SECRET_KEY=${SECRET_KEY}
      - DATABASE_URL=mysql://${DB_USER:-soes_user}:${DB_PASSWORD:-soes_password}@mysql:3306/${DB_NAME:-soes_db}
      - REDIS_URL=redis://redis:6379/0
    depends_on:
      mysql:
        condition: service_healthy
      redis:
        condition: service_healthy
    volumes:
      - ./backend:/app
      - ./logs/backend:/app/logs
    networks:
      - soes-network

  # Celery监控
  celery-flower:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: soes-celery-flower
    restart: unless-stopped
    command: celery -A config flower --port=5555
    environment:
      - DEBUG=${DEBUG:-False}
      - SECRET_KEY=${SECRET_KEY}
      - REDIS_URL=redis://redis:6379/0
    ports:
      - "${FLOWER_PORT:-5555}:5555"
    depends_on:
      redis:
        condition: service_healthy
    networks:
      - soes-network

volumes:
  mysql_data:
  redis_data:
  backend_static:
  backend_media:
  ai_models:

networks:
  soes-network:
    driver: bridge
    ipam:
      config:
        - subnet: 172.20.0.0/16
EOF

    # 创建开发环境配置
    cat > docker-compose.dev.yml << 'EOF'
version: '3.8'

services:
  backend:
    environment:
      - DEBUG=True
      - LOG_LEVEL=DEBUG
    command: python manage.py runserver 0.0.0.0:8000
    volumes:
      - ./backend:/app
    ports:
      - "8000:8000"

  ai-service:
    environment:
      - DEBUG=True
      - LOG_LEVEL=DEBUG
    command: python app/main.py
    volumes:
      - ./ai-service:/app
    ports:
      - "5000:5000"

  frontend:
    environment:
      - NODE_ENV=development
    build:
      context: ./frontend
      dockerfile: Dockerfile.dev
    command: npm run dev
    volumes:
      - ./frontend:/app
      - /app/node_modules
    ports:
      - "3000:3000"

  nginx:
    profiles:
      - disabled
EOF

    # 创建生产环境配置
    cat > docker-compose.prod.yml << 'EOF'
version: '3.8'

services:
  backend:
    environment:
      - DEBUG=False
      - LOG_LEVEL=INFO
    deploy:
      replicas: 2
      resources:
        limits:
          cpus: '1.0'
          memory: 1G
        reservations:
          cpus: '0.5'
          memory: 512M

  ai-service:
    environment:
      - DEBUG=False
      - LOG_LEVEL=INFO
    deploy:
      replicas: 2
      resources:
        limits:
          cpus: '2.0'
          memory: 4G
        reservations:
          cpus: '1.0'
          memory: 2G

  frontend:
    deploy:
      replicas: 2
      resources:
        limits:
          cpus: '0.5'
          memory: 512M

  nginx:
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 256M

  celery-worker:
    deploy:
      replicas: 3
      resources:
        limits:
          cpus: '1.0'
          memory: 1G
EOF

    print_success "Docker环境配置完成"
}

# 创建配置文件
setup_configs() {
    print_info "⚙️ 创建配置文件..."

    # 创建环境变量模板
    cat > .env.example << 'EOF'
# =============================================================================
# 在线智能考试系统环境变量配置
# =============================================================================

# 应用基础配置
SECRET_KEY=your-super-secret-key-change-in-production
DEBUG=False
ALLOWED_HOSTS=localhost,127.0.0.1

# 数据库配置
DB_NAME=soes_db
DB_USER=soes_user
DB_PASSWORD=soes_password
DB_ROOT_PASSWORD=rootpassword
DB_HOST=mysql
DB_PORT=3306

# Redis配置
REDIS_URL=redis://redis:6379/0
REDIS_PORT=6379

# 服务端口配置
BACKEND_PORT=8000
FRONTEND_PORT=3000
AI_SERVICE_PORT=5000
HTTP_PORT=80
HTTPS_PORT=443
FLOWER_PORT=5555

# 前端配置
NODE_ENV=production
VITE_API_BASE_URL=http://localhost:8000/api/v1

# AI服务配置
MODEL_PATH=./ai-service/models
AI_SERVICE_TIMEOUT=30
MODEL_CACHE_SIZE=10

# 邮件配置
EMAIL_HOST=smtp.gmail.com
EMAIL_PORT=587
EMAIL_HOST_USER=your-email@gmail.com
EMAIL_HOST_PASSWORD=your-app-password
EMAIL_USE_TLS=True

# 文件存储配置
MEDIA_ROOT=./media
STATIC_ROOT=./staticfiles
MAX_UPLOAD_SIZE=10485760  # 10MB

# 安全配置
CORS_ALLOWED_ORIGINS=http://localhost:3000,http://127.0.0.1:3000
SECURE_SSL_REDIRECT=False
SESSION_COOKIE_SECURE=False
CSRF_COOKIE_SECURE=False

# 日志配置
LOG_LEVEL=INFO
LOG_FILE=./logs/app.log

# Celery配置
CELERY_BROKER_URL=redis://redis:6379/0
CELERY_RESULT_BACKEND=redis://redis:6379/0

# 监控配置
PROMETHEUS_ENABLED=False
METRICS_PORT=9090

# 第三方服务配置
SENTRY_DSN=
GOOGLE_ANALYTICS_ID=

# 开发配置
DEVELOPMENT=True
RELOAD=True
EOF

    # 创建MySQL初始化脚本
    mkdir -p config/mysql
    cat > config/mysql/init.sql << 'EOF'
-- 在线智能考试系统数据库初始化脚本

-- 设置字符集
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;

-- 创建数据库用户和权限
CREATE USER IF NOT EXISTS 'soes_user'@'%' IDENTIFIED BY 'soes_password';
GRANT ALL PRIVILEGES ON soes_db.* TO 'soes_user'@'%';
FLUSH PRIVILEGES;

SET FOREIGN_KEY_CHECKS = 1;
EOF

    # 创建Redis配置
    mkdir -p config/redis
    cat > config/redis/redis.conf << 'EOF'
# Redis配置文件

# 基础配置
port 6379
bind 0.0.0.0
timeout 0
tcp-keepalive 300

# 内存配置
maxmemory 256mb
maxmemory-policy allkeys-lru

# 持久化配置
save 900 1
save 300 10
save 60 10000

# AOF配置
appendonly yes
appendfsync everysec

# 日志配置
loglevel notice
logfile ""

# 安全配置
protected-mode no

# 性能配置
tcp-backlog 511
databases 16
EOF

    # 创建Nginx配置
    mkdir -p config/nginx/sites-available
    cat > config/nginx/nginx.conf << 'EOF'
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 1024;
    use epoll;
    multi_accept on;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    # 日志格式
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';

    access_log /var/log/nginx/access.log main;

    # 基础配置
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    server_tokens off;

    # Gzip压缩
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/json
        application/javascript
        application/xml+rss
        application/atom+xml
        image/svg+xml;

    # 上游服务器配置
    upstream backend {
        server backend:8000;
        keepalive 32;
    }

    upstream ai_service {
        server ai-service:5000;
        keepalive 16;
    }

    # 限流配置
    limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
    limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m;

    # 主服务器配置
    server {
        listen 80;
        server_name localhost;

        # 安全头部
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-XSS-Protection "1; mode=block" always;
        add_header X-Content-Type-Options "nosniff" always;
        add_header Referrer-Policy "no-referrer-when-downgrade" always;
        add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;

        # 前端静态文件
        location / {
            root /usr/share/nginx/html;
            index index.html index.htm;
            try_files $uri $uri/ /index.html;

            # 缓存配置
            location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
                expires 1y;
                add_header Cache-Control "public, immutable";
            }
        }

        # API代理
        location /api/ {
            limit_req zone=api burst=20 nodelay;

            proxy_pass http://backend;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_connect_timeout 30s;
            proxy_send_timeout 30s;
            proxy_read_timeout 30s;
        }

        # AI服务代理
        location /ai/ {
            limit_req zone=api burst=10 nodelay;

            proxy_pass http://ai_service/;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_connect_timeout 60s;
            proxy_send_timeout 60s;
            proxy_read_timeout 60s;
        }

        # 登录接口限流
        location /api/v1/auth/login/ {
            limit_req zone=login burst=5 nodelay;

            proxy_pass http://backend;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }

        # 静态文件代理
        location /static/ {
            alias /var/www/static/;
            expires 1y;
            add_header Cache-Control "public, immutable";
        }

        location /media/ {
            alias /var/www/media/;
            expires 1M;
            add_header Cache-Control "public";
        }

        # 健康检查
        location /health {
            access_log off;
            return 200 "healthy\n";
            add_header Content-Type text/plain;
        }

        # 错误页面
        error_page 404 /404.html;
        error_page 500 502 503 504 /50x.html;

        location = /50x.html {
            root /usr/share/nginx/html;
        }
    }
}
EOF

    # 创建Makefile
    cat > Makefile << 'EOF'
.PHONY: help install dev test build deploy clean logs shell

# 默认目标
help:
	@echo "📚 在线智能考试系统 - 可用命令:"
	@echo ""
	@echo "🚀 开发命令:"
	@echo "  install     - 安装所有依赖"
	@echo "  dev         - 启动开发环境"
	@echo "  down        - 停止开发环境"
	@echo "  logs        - 查看服务日志"
	@echo "  shell       - 进入后端shell"
	@echo ""
	@echo "🧪 测试命令:"
	@echo "  test        - 运行所有测试"
	@echo "  test-backend - 运行后端测试"
	@echo "  test-frontend- 运行前端测试"
	@echo "  test-ai      - 运行AI服务测试"
	@echo ""
	@echo "🔧 管理命令:"
	@echo "  migrate     - 运行数据库迁移"
	@echo "  createsuperuser - 创建超级用户"
	@echo "  collectstatic - 收集静态文件"
	@echo "  loaddata    - 加载初始数据"
	@echo ""
	@echo "🏗️ 构建命令:"
	@echo "  build       - 构建Docker镜像"
	@echo "  rebuild     - 重新构建镜像"
	@echo "  deploy      - 部署到生产环境"
	@echo ""
	@echo "🧹 清理命令:"
	@echo "  clean       - 清理临时文件"
	@echo "  clean-all   - 清理所有数据"

# 安装依赖
install:
	@echo "📦 安装项目依赖..."
	@cp .env.example .env
	@echo "✅ 请编辑 .env 文件配置您的环境变量"
	@echo "✅ 依赖安装完成"

# 启动开发环境
dev:
	@echo "🚀 启动开发环境..."
	docker-compose -f docker-compose.yml -f docker-compose.dev.yml up -d
	@echo "✅ 开发环境已启动"
	@echo "🌐 前端地址: http://localhost:3000"
	@echo "🔧 后端API: http://localhost:8000/api/v1"
	@echo "🤖 AI服务: http://localhost:5000/api/v1"
	@echo "🌸 Celery监控: http://localhost:5555"

# 停止开发环境
down:
	@echo "🛑 停止开发环境..."
	docker-compose down
	@echo "✅ 开发环境已停止"

# 查看日志
logs:
	@echo "📋 查看服务日志..."
	docker-compose logs -f

# 查看特定服务日志
logs-backend:
	docker-compose logs -f backend

logs-frontend:
	docker-compose logs -f frontend

logs-ai:
	docker-compose logs -f ai-service

# 运行测试
test:
	@echo "🧪 运行所有测试..."
	@$(MAKE) test-backend
	@$(MAKE) test-frontend
	@$(MAKE) test-ai

test-backend:
	@echo "🐍 运行后端测试..."
	docker-compose exec backend python manage.py test

test-frontend:
	@echo "⚛️ 运行前端测试..."
	docker-compose exec frontend npm run test:unit

test-ai:
	@echo "🤖 运行AI服务测试..."
	docker-compose exec ai-service python -m pytest

# 数据库迁移
migrate:
	@echo "🔄 运行数据库迁移..."
	docker-compose exec backend python manage.py makemigrations
	docker-compose exec backend python manage.py migrate
	@echo "✅ 数据库迁移完成"

# 创建超级用户
createsuperuser:
	@echo "👤 创建超级用户..."
	docker-compose exec backend python manage.py createsuperuser

# 收集静态文件
collectstatic:
	@echo "📁 收集静态文件..."
	docker-compose exec backend python manage.py collectstatic --noinput
	@echo "✅ 静态文件收集完成"

# 加载初始数据
loaddata:
	@echo "📊 加载初始数据..."
	docker-compose exec backend python manage.py loaddata fixtures/initial_data.json
	@echo "✅ 初始数据加载完成"

# 构建镜像
build:
	@echo "🏗️ 构建Docker镜像..."
	docker-compose build
	@echo "✅ 镜像构建完成"

# 重新构建镜像
rebuild:
	@echo "🔄 重新构建Docker镜像..."
	docker-compose build --no-cache
	@echo "✅ 镜像重新构建完成"

# 部署到生产环境
deploy:
	@echo "🚀 部署到生产环境..."
	docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
	@$(MAKE) migrate
	@$(MAKE) collectstatic
	@echo "✅ 部署完成"

# 清理临时文件
clean:
	@echo "🧹 清理临时文件..."
	docker-compose down -v
	docker system prune -f
	cd frontend && rm -rf node_modules dist
	cd backend && find . -type d -name __pycache__ -delete
	cd ai-service && find . -type d -name __pycache__ -delete
	@echo "✅ 清理完成"

# 清理所有数据（谨慎使用）
clean-all:
	@echo "⚠️ 清理所有数据（包括数据库）..."
	@read -p "确定要继续吗？[y/N] " confirm && [ "$$confirm" = "y" ]
	docker-compose down -v --remove-orphans
	docker system prune -af --volumes
	docker volume prune -f
	@echo "✅ 清理完成"

# 进入后端shell
shell:
	@echo "🐚 进入后端shell..."
	docker-compose exec backend python manage.py shell

# 进入数据库
db:
	@echo "🗄️ 进入数据库..."
	docker-compose exec mysql mysql -u root -p

# 备份数据库
backup:
	@echo "💾 备份数据库..."
	mkdir -p backups
	docker-compose exec mysql mysqldump -u root -p soes_db > backups/backup_$$(date +%Y%m%d_%H%M%S).sql
	@echo "✅ 数据库备份完成"

# 恢复数据库
restore:
	@echo "📥 恢复数据库..."
	@read -p "请输入备份文件路径: " backup_file
	docker-compose exec -T mysql mysql -u root -p soes_db < $$backup_file
	@echo "✅ 数据库恢复完成"

# 重启服务
restart:
	@echo "🔄 重启服务..."
	docker-compose restart
	@echo "✅ 服务重启完成"

# 查看服务状态
status:
	@echo "📊 查看服务状态..."
	docker-compose ps

# 更新依赖
update:
	@echo "🔄 更新依赖..."
	cd backend && pip install -r requirements/production.txt --upgrade
	cd frontend && npm update
	cd ai-service && pip install -r requirements/production.txt --upgrade
	@echo "✅ 依赖更新完成"
EOF

    print_success "配置文件创建完成"
}

# 安装依赖
install_dependencies() {
    print_info "📦 安装项目依赖..."

    # 检查是否存在.env文件
    if [ ! -f .env ]; then
        print_warning "未找到.env文件，正在创建..."
        cp .env.example .env
        print_info "请编辑 .env 文件配置您的环境变量"
    fi

    print_success "依赖安装完成"
}

# 显示完成信息
show_completion_info() {
    echo ""
    print_success "🎉 项目初始化完成！"
    echo ""
    print_info "📋 下一步操作："
    echo ""
    echo "1️⃣  配置环境变量："
    echo "   vim .env"
    echo ""
    echo "2️⃣  启动开发环境："
    echo "   make dev"
    echo ""
    echo "3️⃣  运行数据库迁移："
    echo "   make migrate"
    echo ""
    echo "4️⃣  创建超级用户："
    echo "   makesuperuser"
    echo ""
    echo "5️⃣  访问应用："
    echo "   🌐 前端: http://localhost:3000"
    echo "   🔧 后端API: http://localhost:8000/api/v1"
    echo "   🤖 AI服务: http://localhost:5000/api/v1"
    echo "   🌸 Celery监控: http://localhost:5555"
    echo ""
    print_info "📚 更多信息："
    echo "   📖 查看文档: docs/"
    echo "   📋 查看命令: make help"
    echo "   📋 查看日志: make logs"
    echo ""
    print_info "🎯 开发团队角色提示词："
    echo "   👨‍💼 项目经理: docs/项目经理提示词.md"
    echo "   🎨 前端工程师: docs/前端工程师提示词.md"
    echo "   🐍 后端工程师: docs/后端工程师提示词.md"
    echo "   🤖 AI工程师: docs/AI工程师提示词.md"
    echo "   🔧 自动化流程: docs/自动化开发流程.md"
    echo ""
}

# 主函数
main() {
    show_banner

    print_info "🚀 开始初始化在线智能考试系统(AI-SOES)项目..."
    echo ""

    check_requirements
    create_project_structure
    init_git_repository
    setup_docker
    setup_configs
    install_dependencies

    show_completion_info
}

# 错误处理
trap 'print_error "初始化失败，请检查错误信息"; exit 1' ERR

# 运行主函数
main "$@"