#!/bin/bash

# Qwen2.5-32B 模型服务启动脚本
# 用于快速启动 vLLM API 服务

set -e

# 颜色定义
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'

echo "========================================="
echo "Qwen2.5-32B 服务启动脚本"
echo "========================================="

# 获取脚本所在目录
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"

# 检查模型文件
MODEL_PATH="./models/Qwen/Qwen2.5-32B"
if [ ! -d "$MODEL_PATH" ]; then
    echo -e "${RED}错误: 模型目录不存在: $MODEL_PATH${NC}"
    echo "请先运行部署脚本: bash deploy_qwen.sh"
    exit 1
fi

# 检查 Conda 环境
if [ -z "$CONDA_DEFAULT_ENV" ]; then
    echo "正在激活 Conda 环境..."
    
    # 尝试初始化 conda
    if [ -f "$HOME/miniconda/etc/profile.d/conda.sh" ]; then
        source "$HOME/miniconda/etc/profile.d/conda.sh"
    elif [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
        source "$HOME/anaconda3/etc/profile.d/conda.sh"
    else
        echo -e "${YELLOW}警告: 无法自动激活 Conda 环境${NC}"
        echo "请手动运行: conda activate qwen32b"
        exit 1
    fi
    
    # 激活环境
    if conda activate qwen32b 2>/dev/null; then
        echo -e "${GREEN}✓ 已激活 qwen32b 环境${NC}"
    else
        echo -e "${RED}错误: 无法激活 qwen32b 环境${NC}"
        echo "请手动运行: conda activate qwen32b"
        exit 1
    fi
else
    if [ "$CONDA_DEFAULT_ENV" != "qwen32b" ]; then
        echo -e "${YELLOW}警告: 当前环境是 $CONDA_DEFAULT_ENV，建议使用 qwen32b${NC}"
        read -p "是否切换到 qwen32b 环境? (y/N): " -n 1 -r
        echo
        if [[ $REPLY =~ ^[Yy]$ ]]; then
            conda activate qwen32b
        fi
    else
        echo -e "${GREEN}✓ 已在 qwen32b 环境中${NC}"
    fi
fi

# 检查依赖
echo ""
echo "检查依赖..."
if ! python -c "import vllm" 2>/dev/null; then
    echo -e "${RED}错误: vLLM 未安装${NC}"
    echo "请运行: bash install_missing_deps.sh"
    exit 1
fi

if ! python -c "import torch; assert torch.cuda.is_available()" 2>/dev/null; then
    echo -e "${YELLOW}警告: CUDA 不可用${NC}"
    echo "模型需要 GPU 才能运行"
    read -p "是否继续? (y/N): " -n 1 -r
    echo
    if [[ ! $REPLY =~ ^[Yy]$ ]]; then
        exit 1
    fi
fi

# 配置参数
HOST="${HOST:-0.0.0.0}"
PORT="${PORT:-8000}"
MAX_MODEL_LEN="${MAX_MODEL_LEN:-4096}"
GPU_MEMORY_UTIL="${GPU_MEMORY_UTIL:-0.9}"

echo ""
echo "========================================="
echo "启动配置"
echo "========================================="
echo "模型路径: $MODEL_PATH"
echo "监听地址: $HOST:$PORT"
echo "最大序列长度: $MAX_MODEL_LEN"
echo "GPU 内存使用率: $GPU_MEMORY_UTIL"
echo ""

# 询问运行模式
echo "选择运行模式:"
echo "  1. 前台运行（适合测试，Ctrl+C 停止）"
echo "  2. 后台运行（适合生产，输出到日志文件）"
echo "  3. screen 会话（适合开发，可随时连接）"
read -p "请选择 (1/2/3) [默认: 1]: " mode
mode=${mode:-1}

case $mode in
    1)
        echo ""
        echo -e "${GREEN}启动服务（前台运行）...${NC}"
        echo "按 Ctrl+C 停止服务"
        echo ""
        python -m vllm.entrypoints.openai.api_server \
            --model "$MODEL_PATH" \
            --trust-remote-code \
            --host "$HOST" \
            --port "$PORT" \
            --max-model-len "$MAX_MODEL_LEN" \
            --gpu-memory-utilization "$GPU_MEMORY_UTIL"
        ;;
    2)
        LOG_FILE="qwen_server_$(date +%Y%m%d_%H%M%S).log"
        echo ""
        echo -e "${GREEN}启动服务（后台运行）...${NC}"
        echo "日志文件: $LOG_FILE"
        echo ""
        nohup python -m vllm.entrypoints.openai.api_server \
            --model "$MODEL_PATH" \
            --trust-remote-code \
            --host "$HOST" \
            --port "$PORT" \
            --max-model-len "$MAX_MODEL_LEN" \
            --gpu-memory-utilization "$GPU_MEMORY_UTIL" \
            > "$LOG_FILE" 2>&1 &
        
        PID=$!
        echo "服务已启动，PID: $PID"
        echo ""
        echo "查看日志: tail -f $LOG_FILE"
        echo "停止服务: kill $PID 或 pkill -f 'vllm.entrypoints.openai.api_server'"
        echo "检查状态: ps aux | grep vllm"
        ;;
    3)
        echo ""
        echo -e "${GREEN}启动服务（screen 会话）...${NC}"
        
        # 检查 screen 是否安装
        if ! command -v screen &> /dev/null; then
            echo "screen 未安装，正在安装..."
            sudo apt install -y screen
        fi
        
        # 检查是否已有会话
        if screen -list | grep -q "qwen"; then
            echo -e "${YELLOW}警告: 已存在名为 'qwen' 的 screen 会话${NC}"
            read -p "是否连接到现有会话? (y/N): " -n 1 -r
            echo
            if [[ $REPLY =~ ^[Yy]$ ]]; then
                screen -r qwen
                exit 0
            else
                read -p "请输入新的会话名称 [默认: qwen]: " session_name
                session_name=${session_name:-qwen}
            fi
        else
            session_name="qwen"
        fi
        
        # 创建启动命令脚本
        START_CMD="conda activate qwen32b && cd '$SCRIPT_DIR' && python -m vllm.entrypoints.openai.api_server --model '$MODEL_PATH' --trust-remote-code --host '$HOST' --port '$PORT' --max-model-len '$MAX_MODEL_LEN' --gpu-memory-utilization '$GPU_MEMORY_UTIL'"
        
        screen -dmS "$session_name" bash -c "$START_CMD; exec bash"
        
        echo "服务已在 screen 会话 '$session_name' 中启动"
        echo ""
        echo "连接会话: screen -r $session_name"
        echo "列出会话: screen -ls"
        echo "分离会话: 按 Ctrl+A，然后按 D"
        ;;
    *)
        echo -e "${RED}无效选择${NC}"
        exit 1
        ;;
esac

echo ""
echo "========================================="
echo "服务信息"
echo "========================================="
echo "API 地址: http://$HOST:$PORT"
echo "API 文档: http://$HOST:$PORT/docs"
echo ""
echo "测试命令:"
echo "  curl http://localhost:$PORT/v1/models"
echo ""

