#!/bin/bash
# 快速测试本地XrayGLM模型加载修复效果

echo "=================================="
echo "🔧 XrayGLM模型加载测试"
echo "=================================="
echo ""

# 切换到项目目录
cd "$(dirname "$0")"

# 检查Python环境
echo "1️⃣ 检查Python环境..."
which python
python --version
echo ""

# 检查必要的包
echo "2️⃣ 检查必要的Python包..."
python -c "import torch; print(f'✅ PyTorch {torch.__version__}')" 2>/dev/null || echo "❌ PyTorch未安装"
python -c "import transformers; print(f'✅ Transformers {transformers.__version__}')" 2>/dev/null || echo "❌ Transformers未安装"
python -c "import sat; print('✅ SAT (SwissArmyTransformer)')" 2>/dev/null || echo "❌ SAT未安装"
echo ""

# 检查GPU
echo "3️⃣ 检查GPU可用性..."
python -c "import torch; print(f'GPU可用: {torch.cuda.is_available()}'); print(f'GPU数量: {torch.cuda.device_count()}' if torch.cuda.is_available() else '')"
echo ""

# 检查模型文件
echo "4️⃣ 检查模型文件..."
MODEL_PATH="/data/Matrix/yy/xrayglm/XrayGLM/checkpoints/checkpoints-XrayGLM-3000"
TOKENIZER_PATH="/data/Matrix/yy/xrayglm/XrayGLM/VisualGLM-6B-SAT/visual-6b"

if [ -d "$MODEL_PATH" ]; then
    echo "✅ 模型路径存在: $MODEL_PATH"
else
    echo "❌ 模型路径不存在: $MODEL_PATH"
fi

if [ -d "$TOKENIZER_PATH" ]; then
    echo "✅ Tokenizer路径存在: $TOKENIZER_PATH"
else
    echo "❌ Tokenizer路径不存在: $TOKENIZER_PATH"
fi
echo ""

# 运行测试脚本
echo "5️⃣ 运行模型加载测试..."
echo "=================================="
python test_model_loading.py

# 检查测试结果
if [ $? -eq 0 ]; then
    echo ""
    echo "=================================="
    echo "✅ 测试通过！"
    echo "=================================="
    echo ""
    echo "📝 接下来你可以："
    echo "   1. 启动后端服务: bash scripts/start_backend.sh"
    echo "   2. 启动前端服务: bash scripts/start_frontend.sh"
    echo "   3. 访问: http://localhost:5174"
    echo ""
else
    echo ""
    echo "=================================="
    echo "❌ 测试失败"
    echo "=================================="
    echo ""
    echo "📝 请检查："
    echo "   1. 查看上面的错误信息"
    echo "   2. 确认模型文件路径正确"
    echo "   3. 确认所有依赖已安装"
    echo "   4. 查看详细文档: cat XRAYGLM_FIX.md"
    echo ""
fi

