#!/usr/bin/env python3
"""
环境检查脚本 - 验证微调环境是否正确配置
"""

import sys
import torch
import subprocess
import unsloth

def check_python_version():
    """检查Python版本"""
    print("=" * 50)
    print("检查Python环境")
    print("=" * 50)
    print(f"Python版本: {sys.version}")
    
    if sys.version_info < (3, 8):
        print("❌ Python版本过低，建议3.8+")
        return False
    else:
        print("✅ Python版本符合要求")
        return True

def check_gpu():
    """检查GPU环境"""
    print("\n" + "=" * 50)
    print("检查GPU环境")
    print("=" * 50)
    
    if not torch.cuda.is_available():
        print("❌ CUDA不可用")
        return False
    
    gpu_count = torch.cuda.device_count()
    print(f"✅ 检测到 {gpu_count} 个GPU")
    
    for i in range(gpu_count):
        gpu_name = torch.cuda.get_device_name(i)
        memory_total = torch.cuda.get_device_properties(i).total_memory / 1024**3
        print(f"  GPU {i}: {gpu_name} ({memory_total:.1f} GB)")
    
    return True

def check_libraries():
    """检查必要的库"""
    print("\n" + "=" * 50)
    print("检查Python库")
    print("=" * 50)
    
    libraries = {
        'torch': '用于深度学习',
        'transformers': '用于预训练模型',
        'datasets': '用于数据处理',
        'peft': '用于参数高效微调',
        'bitsandbytes': '用于量化',
        'unsloth': '用于加速微调',
        'trl': '用于强化学习训练器',
        'modelscope': '用于模型下载'
    }
    
    all_good = True
    for lib, desc in libraries.items():
        try:
            module = __import__(lib)
            version = getattr(module, '__version__', 'unknown')
            print(f"✅ {lib} ({version}) - {desc}")
        except ImportError:
            print(f"❌ {lib} - {desc} [未安装]")
            all_good = False
    
    return all_good

def check_unsloth_specific():
    """检查Unsloth特定功能"""
    print("\n" + "=" * 50)
    print("检查Unsloth功能")
    print("=" * 50)
    
    try:
        from unsloth import FastLanguageModel
        print("✅ FastLanguageModel导入成功")
        
        # 检查支持的模型
        print("✅ Unsloth可用，支持的模型架构:")
        print("  - Llama (Llama 2, Code Llama, Yi等)")
        print("  - Mistral (Mistral 7B, Mixtral等)")
        print("  - Qwen (Qwen 1.5, Qwen 2等)")
        print("  - Gemma")
        
        return True
    except Exception as e:
        print(f"❌ Unsloth功能检查失败: {e}")
        return False

def check_memory():
    """检查内存情况"""
    print("\n" + "=" * 50)
    print("检查内存情况")
    print("=" * 50)
    
    memory_ok = True
    
    # 系统内存
    try:
        import psutil
        memory = psutil.virtual_memory()
        print(f"系统内存: {memory.total / 1024**3:.1f} GB (可用: {memory.available / 1024**3:.1f} GB)")
    except ImportError:
        print("无法检查系统内存 (psutil未安装)")
    
    # GPU内存
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            memory_total = torch.cuda.get_device_properties(i).total_memory / 1024**3
            memory_allocated = torch.cuda.memory_allocated(i) / 1024**3
            memory_reserved = torch.cuda.memory_reserved(i) / 1024**3
            print(f"GPU {i} 内存: {memory_total:.1f} GB 总计")
            print(f"  已分配: {memory_allocated:.1f} GB, 已保留: {memory_reserved:.1f} GB")
            
            if memory_total >= 20:
                print(f"  ✅ GPU {i} 内存足够运行Qwen3-32B (4bit)")
            else:
                print(f"  ⚠️  GPU {i} 内存可能不足，建议20GB+")
                memory_ok = False
    else:
        print("❌ 没有可用的GPU")
        memory_ok = False
    
    return memory_ok

def check_disk_space():
    """检查磁盘空间"""
    print("\n" + "=" * 50)
    print("检查磁盘空间")
    print("=" * 50)
    
    import shutil
    
    current_dir = "."
    total, used, free = shutil.disk_usage(current_dir)
    
    print(f"当前目录磁盘空间:")
    print(f"  总计: {total / 1024**3:.1f} GB")
    print(f"  已使用: {used / 1024**3:.1f} GB")
    print(f"  可用: {free / 1024**3:.1f} GB")
    
    # 估计需要的空间
    model_size = 65  # Qwen3-32B大约65GB
    data_size = 1    # 数据集约1GB
    output_size = 5  # 输出约5GB
    total_needed = model_size + data_size + output_size
    
    print(f"\n预估需要空间:")
    print(f"  模型: ~{model_size} GB")
    print(f"  数据集: ~{data_size} GB") 
    print(f"  输出: ~{output_size} GB")
    print(f"  总计: ~{total_needed} GB")
    
    if free / 1024**3 >= total_needed:
        print(f"✅ 磁盘空间充足")
        return True
    else:
        print(f"⚠️  磁盘空间可能不足，建议至少{total_needed}GB可用空间")
        return False

def test_simple_model_loading():
    """测试简单的模型加载"""
    print("\n" + "=" * 50)
    print("测试模型加载功能")
    print("=" * 50)
    
    try:
        from unsloth import FastLanguageModel
        
        print("尝试加载一个小型模型进行测试...")
        # 使用一个小的模型测试
        model, tokenizer = FastLanguageModel.from_pretrained(
            model_name="unsloth/tinyllama-bnb-4bit",
            max_seq_length=512,
            dtype=None,
            load_in_4bit=True,
        )
        
        print("✅ 模型加载测试成功")
        
        # 清理内存
        del model, tokenizer
        torch.cuda.empty_cache()
        
        return True
    except Exception as e:
        print(f"❌ 模型加载测试失败: {e}")
        print("这可能是网络问题，实际微调时会下载到本地")
        return False

def main():
    """主函数"""
    print("Qwen3 32B 微调环境检查")
    print("此脚本将检查您的环境是否适合运行Qwen3 32B微调")
    
    checks = [
        check_python_version(),
        check_gpu(),
        check_libraries(),
        check_unsloth_specific(),
        check_memory(),
        check_disk_space(),
        # test_simple_model_loading(),  # 可选，需要网络
    ]
    
    print("\n" + "=" * 50)
    print("检查总结")
    print("=" * 50)
    
    passed = sum(checks)
    total = len(checks)
    
    print(f"通过检查: {passed}/{total}")
    
    if passed == total:
        print("🎉 恭喜！您的环境已准备就绪，可以开始微调Qwen3 32B！")
        print("\n下一步:")
        print("1. 运行快速启动: ./scripts/quick_start.sh")
        print("2. 或查看详细文档: cat Qwen3_32B_微调指南.md")
    elif passed >= total - 1:
        print("⚠️  环境基本就绪，但有一些小问题需要注意")
        print("建议解决上述问题后再开始微调")
    else:
        print("❌ 环境存在问题，请先解决上述问题")
        print("参考安装文档或联系技术支持")

if __name__ == "__main__":
    main()
