#!/bin/bash

# vLLM 引擎初始化失败诊断脚本
# 用于快速诊断 "Engine core initialization failed" 错误

echo "========================================="
echo "vLLM 引擎初始化失败诊断"
echo "========================================="

# 颜色定义
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'

PASSED=0
FAILED=0

check_item() {
    local name=$1
    local check_cmd=$2
    
    echo -n "检查 $name... "
    if eval "$check_cmd" > /dev/null 2>&1; then
        echo -e "${GREEN}✓${NC}"
        ((PASSED++))
        return 0
    else
        echo -e "${RED}✗${NC}"
        ((FAILED++))
        return 1
    fi
}

# 1. 检查 Conda 环境
echo ""
echo "【1】检查 Conda 环境..."
if [ -z "$CONDA_DEFAULT_ENV" ]; then
    echo -e "${YELLOW}⚠ 未激活 Conda 环境${NC}"
    echo "正在尝试激活 qwen32b 环境..."
    
    if [ -f "$HOME/miniconda/etc/profile.d/conda.sh" ]; then
        source "$HOME/miniconda/etc/profile.d/conda.sh"
        conda activate qwen32b 2>/dev/null || {
            echo -e "${RED}✗ 无法激活 qwen32b 环境${NC}"
            exit 1
        }
    else
        echo -e "${RED}✗ 无法找到 Conda${NC}"
        exit 1
    fi
else
    echo -e "${GREEN}✓ 当前环境: $CONDA_DEFAULT_ENV${NC}"
    if [ "$CONDA_DEFAULT_ENV" != "qwen32b" ]; then
        echo -e "${YELLOW}⚠ 建议使用 qwen32b 环境${NC}"
    fi
fi

# 2. 检查 GPU
echo ""
echo "【2】检查 GPU..."
if command -v nvidia-smi &> /dev/null; then
    GPU_INFO=$(nvidia-smi --query-gpu=name,memory.total,driver_version --format=csv,noheader 2>/dev/null | head -1)
    if [ -n "$GPU_INFO" ]; then
        echo -e "${GREEN}✓ GPU 信息: $GPU_INFO${NC}"
        ((PASSED++))
        
        # 检查显存使用
        MEMORY_USAGE=$(nvidia-smi --query-gpu=memory.used,memory.total --format=csv,noheader,nounits 2>/dev/null | head -1)
        if [ -n "$MEMORY_USAGE" ]; then
            echo "  显存使用: $MEMORY_USAGE"
        fi
    else
        echo -e "${RED}✗ nvidia-smi 无输出${NC}"
        ((FAILED++))
    fi
else
    echo -e "${RED}✗ nvidia-smi 不可用${NC}"
    echo "  请安装 NVIDIA 驱动"
    ((FAILED++))
fi

# 3. 检查 CUDA
echo ""
echo "【3】检查 CUDA 环境..."
CUDA_AVAILABLE=$(python -c "import torch; print(torch.cuda.is_available())" 2>/dev/null)
if [ "$CUDA_AVAILABLE" = "True" ]; then
    echo -e "${GREEN}✓ CUDA 可用${NC}"
    CUDA_VERSION=$(python -c "import torch; print(torch.version.cuda)" 2>/dev/null)
    GPU_COUNT=$(python -c "import torch; print(torch.cuda.device_count())" 2>/dev/null)
    echo "  CUDA 版本: $CUDA_VERSION"
    echo "  GPU 数量: $GPU_COUNT"
    ((PASSED++))
else
    echo -e "${RED}✗ CUDA 不可用${NC}"
    echo "  这是导致引擎初始化失败的主要原因"
    ((FAILED++))
fi

# 4. 检查 PyTorch
echo ""
echo "【4】检查 PyTorch..."
PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__)" 2>/dev/null)
if [ -n "$PYTORCH_VERSION" ]; then
    echo -e "${GREEN}✓ PyTorch: $PYTORCH_VERSION${NC}"
    if echo "$PYTORCH_VERSION" | grep -q "cu"; then
        echo "  CUDA 版本: $(echo $PYTORCH_VERSION | grep -oP 'cu\d+')"
    fi
    ((PASSED++))
else
    echo -e "${RED}✗ PyTorch 未安装${NC}"
    ((FAILED++))
fi

# 5. 检查 vLLM
echo ""
echo "【5】检查 vLLM..."
VLLM_VERSION=$(python -c "import vllm; print(vllm.__version__)" 2>/dev/null)
if [ -n "$VLLM_VERSION" ]; then
    echo -e "${GREEN}✓ vLLM: $VLLM_VERSION${NC}"
    ((PASSED++))
else
    echo -e "${RED}✗ vLLM 未安装${NC}"
    ((FAILED++))
fi

# 6. 检查模型文件
echo ""
echo "【6】检查模型文件..."
MODEL_PATH="./models/Qwen/Qwen2.5-32B"
if [ -d "$MODEL_PATH" ]; then
    echo -e "${GREEN}✓ 模型目录存在${NC}"
    
    # 检查关键文件
    if [ -f "$MODEL_PATH/config.json" ]; then
        echo -e "${GREEN}✓ config.json 存在${NC}"
        ((PASSED++))
    else
        echo -e "${RED}✗ config.json 不存在${NC}"
        ((FAILED++))
    fi
    
    if [ -f "$MODEL_PATH/tokenizer_config.json" ]; then
        echo -e "${GREEN}✓ tokenizer_config.json 存在${NC}"
        ((PASSED++))
    else
        echo -e "${YELLOW}⚠ tokenizer_config.json 不存在${NC}"
    fi
    
    # 检查模型文件大小
    MODEL_SIZE=$(du -sh "$MODEL_PATH" 2>/dev/null | cut -f1)
    echo "  模型大小: $MODEL_SIZE"
    
    ((PASSED++))
else
    echo -e "${RED}✗ 模型目录不存在: $MODEL_PATH${NC}"
    ((FAILED++))
fi

# 7. 检查系统资源
echo ""
echo "【7】检查系统资源..."
# 检查内存
TOTAL_MEM=$(free -g | awk '/^Mem:/{print $2}')
AVAIL_MEM=$(free -g | awk '/^Mem:/{print $7}')
echo "  总内存: ${TOTAL_MEM}GB"
echo "  可用内存: ${AVAIL_MEM}GB"

# 检查磁盘空间
DISK_AVAIL=$(df -h . | awk 'NR==2 {print $4}')
echo "  可用磁盘空间: $DISK_AVAIL"

# 8. 尝试简单测试
echo ""
echo "【8】尝试简单测试..."
echo "测试 vLLM 导入..."
if python -c "from vllm import LLM; print('✓ vLLM 可以导入')" 2>&1; then
    echo -e "${GREEN}✓ vLLM 导入成功${NC}"
    ((PASSED++))
else
    echo -e "${RED}✗ vLLM 导入失败${NC}"
    ((FAILED++))
fi

# 总结
echo ""
echo "========================================="
echo "诊断总结"
echo "========================================="
echo -e "${GREEN}通过: $PASSED${NC}"
if [ $FAILED -gt 0 ]; then
    echo -e "${RED}失败: $FAILED${NC}"
else
    echo -e "${GREEN}失败: $FAILED${NC}"
fi

echo ""
if [ $FAILED -eq 0 ]; then
    echo -e "${GREEN}✓ 所有检查通过${NC}"
    echo ""
    echo "如果仍然遇到 'Engine core initialization failed' 错误，请尝试："
    echo "  1. 降低 GPU 内存使用率: --gpu-memory-utilization 0.7"
    echo "  2. 减小最大序列长度: --max-model-len 2048"
    echo "  3. 使用单 GPU 模式: CUDA_VISIBLE_DEVICES=0"
    echo "  4. 查看完整错误日志（在 'Engine core initialization failed' 之前的错误）"
else
    echo -e "${YELLOW}⚠ 发现问题${NC}"
    echo ""
    echo "建议的修复步骤："
    
    if [ "$CUDA_AVAILABLE" != "True" ]; then
        echo "  1. 修复 CUDA 问题（最重要）:"
        echo "     - 运行: bash diagnose_gpu.sh"
        echo "     - 检查: nvidia-smi"
        echo "     - 参考手册: CUDA 不可用章节"
    fi
    
    if [ ! -d "$MODEL_PATH" ]; then
        echo "  2. 重新下载模型文件:"
        echo "     - 运行: bash deploy_qwen.sh"
    fi
    
    if [ -z "$VLLM_VERSION" ]; then
        echo "  3. 安装 vLLM:"
        echo "     - 运行: bash install_missing_deps.sh"
    fi
    
    echo ""
    echo "详细解决方案请参考手册: vLLM 引擎初始化失败章节"
fi

echo ""
echo "========================================="
echo "快速修复命令"
echo "========================================="
echo ""
echo "如果 CUDA 可用但仍有问题，尝试："
echo ""
echo "1. 降低内存使用率启动:"
echo "   python -m vllm.entrypoints.openai.api_server \\"
echo "     --model ./models/Qwen/Qwen2.5-32B \\"
echo "     --trust-remote-code \\"
echo "     --host 0.0.0.0 \\"
echo "     --port 8000 \\"
echo "     --gpu-memory-utilization 0.7 \\"
echo "     --max-model-len 2048"
echo ""
echo "2. 使用调试模式获取详细错误:"
echo "   export VLLM_LOGGING_LEVEL=DEBUG"
echo "   python -m vllm.entrypoints.openai.api_server ..."
echo ""
echo "3. 使用 Python API 测试（更详细的错误信息）:"
echo "   python -c \""
echo "   from vllm import LLM"
echo "   llm = LLM(model='./models/Qwen/Qwen2.5-32B', trust_remote_code=True)"
echo "   \""

