#!/bin/bash

# GPU/CUDA 诊断脚本
# 用于排查 vLLM 无法检测到 GPU 的问题

echo "========================================="
echo "GPU/CUDA 环境诊断"
echo "========================================="

# 颜色定义
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'

PASSED=0
FAILED=0

check_item() {
    local name=$1
    local check_cmd=$2
    
    echo -n "检查 $name... "
    if eval "$check_cmd" > /dev/null 2>&1; then
        echo -e "${GREEN}✓${NC}"
        ((PASSED++))
        return 0
    else
        echo -e "${RED}✗${NC}"
        ((FAILED++))
        return 1
    fi
}

# 1. 检查 NVIDIA 驱动
echo ""
echo "【1】检查 NVIDIA 驱动..."
if command -v nvidia-smi &> /dev/null; then
    echo -e "${GREEN}✓ nvidia-smi 命令可用${NC}"
    echo "GPU 信息："
    nvidia-smi --query-gpu=name,driver_version,memory.total --format=csv,noheader | while IFS=',' read -r name driver memory; do
        echo "  - GPU: $name"
        echo "    驱动版本: $driver"
        echo "    显存: $memory"
    done
    ((PASSED++))
else
    echo -e "${RED}✗ nvidia-smi 命令不可用${NC}"
    echo "  可能原因: NVIDIA 驱动未安装"
    echo "  解决方案: 安装 NVIDIA 驱动"
    ((FAILED++))
fi

# 2. 检查 CUDA 环境变量
echo ""
echo "【2】检查 CUDA 环境变量..."
if [ -n "$CUDA_HOME" ]; then
    echo -e "${GREEN}✓ CUDA_HOME: $CUDA_HOME${NC}"
    ((PASSED++))
else
    echo -e "${YELLOW}⚠ CUDA_HOME 未设置${NC}"
    if [ -d "/usr/local/cuda" ]; then
        echo "  发现 /usr/local/cuda，建议设置: export CUDA_HOME=/usr/local/cuda"
    fi
fi

if [ -n "$CUDA_VISIBLE_DEVICES" ]; then
    echo -e "${GREEN}✓ CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES${NC}"
else
    echo -e "${YELLOW}⚠ CUDA_VISIBLE_DEVICES 未设置（这是正常的）${NC}"
fi

if [ -n "$LD_LIBRARY_PATH" ]; then
    if echo "$LD_LIBRARY_PATH" | grep -q "cuda"; then
        echo -e "${GREEN}✓ LD_LIBRARY_PATH 包含 CUDA 路径${NC}"
        ((PASSED++))
    else
        echo -e "${YELLOW}⚠ LD_LIBRARY_PATH 不包含 CUDA 路径${NC}"
    fi
else
    echo -e "${YELLOW}⚠ LD_LIBRARY_PATH 未设置${NC}"
fi

# 3. 检查 PyTorch CUDA 支持
echo ""
echo "【3】检查 PyTorch CUDA 支持..."
if command -v python &> /dev/null; then
    CUDA_AVAILABLE=$(python -c "import torch; print(torch.cuda.is_available())" 2>/dev/null)
    if [ "$CUDA_AVAILABLE" = "True" ]; then
        echo -e "${GREEN}✓ PyTorch 检测到 CUDA${NC}"
        CUDA_VERSION=$(python -c "import torch; print(torch.version.cuda)" 2>/dev/null)
        GPU_COUNT=$(python -c "import torch; print(torch.cuda.device_count())" 2>/dev/null)
        echo "  CUDA 版本: $CUDA_VERSION"
        echo "  GPU 数量: $GPU_COUNT"
        ((PASSED++))
    else
        echo -e "${RED}✗ PyTorch 未检测到 CUDA${NC}"
        echo "  可能原因:"
        echo "    1. PyTorch 安装的是 CPU 版本"
        echo "    2. CUDA 版本不匹配"
        echo "    3. CUDA 运行时库未正确配置"
        ((FAILED++))
    fi
else
    echo -e "${YELLOW}⚠ Python 未找到${NC}"
fi

# 4. 检查 vLLM 平台检测
echo ""
echo "【4】检查 vLLM 平台检测..."
if command -v python &> /dev/null; then
    PLATFORM_CHECK=$(python -c "
import sys
try:
    from vllm.utils import get_cpu_memory
    from vllm.config import DeviceConfig
    print('vLLM 可以导入')
except Exception as e:
    print(f'vLLM 导入错误: {e}')
    sys.exit(1)
" 2>&1)
    
    if echo "$PLATFORM_CHECK" | grep -q "可以导入"; then
        echo -e "${GREEN}✓ vLLM 可以正常导入${NC}"
        ((PASSED++))
    else
        echo -e "${RED}✗ vLLM 导入失败${NC}"
        echo "  错误: $PLATFORM_CHECK"
        ((FAILED++))
    fi
fi

# 5. 检查常见问题
echo ""
echo "【5】常见问题检查..."
echo "检查是否在容器中运行..."
if [ -f /.dockerenv ] || grep -q docker /proc/self/cgroup 2>/dev/null; then
    echo -e "${YELLOW}⚠ 检测到在容器中运行${NC}"
    echo "  确保容器有 GPU 访问权限:"
    echo "    - 使用 --gpus all 或 --device 参数"
    echo "    - 检查 nvidia-container-toolkit 是否安装"
fi

# 总结和建议
echo ""
echo "========================================="
echo "诊断总结"
echo "========================================="
echo -e "${GREEN}通过: $PASSED${NC}"
if [ $FAILED -gt 0 ]; then
    echo -e "${RED}失败: $FAILED${NC}"
else
    echo -e "${GREEN}失败: $FAILED${NC}"
fi

echo ""
if [ $FAILED -eq 0 ]; then
    echo -e "${GREEN}✓ 所有检查通过！${NC}"
else
    echo -e "${YELLOW}⚠ 发现问题，建议：${NC}"
    echo ""
    echo "1. 如果 nvidia-smi 不可用："
    echo "   安装 NVIDIA 驱动: sudo apt install nvidia-driver-xxx"
    echo ""
    echo "2. 如果 PyTorch 未检测到 CUDA："
    echo "   重新安装 PyTorch CUDA 版本:"
    echo "   pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121"
    echo ""
    echo "3. 如果 CUDA 环境变量未设置："
    echo "   添加到 ~/.bashrc:"
    echo "   export CUDA_HOME=/usr/local/cuda"
    echo "   export LD_LIBRARY_PATH=\$CUDA_HOME/lib64:\$LD_LIBRARY_PATH"
    echo ""
    echo "4. 如果在容器中："
    echo "   确保使用正确的 Docker 运行参数"
    echo "   例如: docker run --gpus all ..."
fi

