#!/usr/bin/env python3
"""
GPU训练问题修复脚本
解决PyTorch GPU检测和OpenMP冲突问题
"""

import os
import sys
import subprocess
import platform

def fix_openmp_conflict():
    """修复OpenMP库冲突"""
    print("🔧 修复OpenMP库冲突...")
    
    # 设置环境变量允许OpenMP重复库
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    os.environ['OMP_NUM_THREADS'] = '1'
    
    print("✅ OpenMP环境变量已设置:")
    print(f"  KMP_DUPLICATE_LIB_OK = {os.environ.get('KMP_DUPLICATE_LIB_OK')}")
    print(f"  OMP_NUM_THREADS = {os.environ.get('OMP_NUM_THREADS')}")

def check_current_pytorch():
    """检查当前PyTorch安装"""
    print("\n" + "="*50)
    print("检查当前PyTorch安装")
    print("="*50)
    
    try:
        import torch
        print(f"✅ PyTorch已安装，版本: {torch.__version__}")
        print(f"CUDA可用: {torch.cuda.is_available()}")
        
        if torch.cuda.is_available():
            print(f"CUDA版本: {torch.version.cuda}")
            print(f"GPU数量: {torch.cuda.device_count()}")
            for i in range(torch.cuda.device_count()):
                print(f"  GPU {i}: {torch.cuda.get_device_name(i)}")
            return True
        else:
            print("❌ CUDA不可用 - 需要重新安装支持CUDA的PyTorch")
            return False
            
    except ImportError:
        print("❌ PyTorch未安装")
        return False

def detect_gpu_type():
    """检测GPU类型"""
    print("\n" + "="*50)
    print("检测GPU硬件")
    print("="*50)
    
    nvidia_gpu = False
    amd_gpu = False
    intel_gpu = False
    
    try:
        if platform.system() == "Windows":
            result = subprocess.run(['wmic', 'path', 'win32_VideoController', 'get', 'name'], 
                                 capture_output=True, text=True, shell=True)
            if result.returncode == 0:
                gpu_info = result.stdout.upper()
                print("检测到的显卡:")
                
                for line in result.stdout.strip().split('\n')[1:]:
                    line = line.strip()
                    if line and line != "Name":
                        print(f"  - {line}")
                        
                        if "NVIDIA" in line.upper():
                            nvidia_gpu = True
                            print("    ✅ NVIDIA显卡 - 支持CUDA")
                        elif "AMD" in line.upper() or "RADEON" in line.upper():
                            amd_gpu = True
                            print("    ⚠️ AMD显卡 - 不支持CUDA")
                        elif "INTEL" in line.upper():
                            intel_gpu = True
                            print("    ℹ️ Intel集成显卡 - 不支持CUDA")
    except Exception as e:
        print(f"GPU检测失败: {e}")
    
    return nvidia_gpu, amd_gpu, intel_gpu

def install_pytorch_cuda():
    """安装支持CUDA的PyTorch"""
    print("\n" + "="*50)
    print("安装支持CUDA的PyTorch")
    print("="*50)
    
    # 检测系统CUDA版本
    cuda_version = None
    try:
        result = subprocess.run(['nvcc', '--version'], capture_output=True, text=True)
        if result.returncode == 0:
            output = result.stdout
            for line in output.split('\n'):
                if 'release' in line.lower():
                    # 提取CUDA版本
                    import re
                    match = re.search(r'release (\d+\.\d+)', line)
                    if match:
                        cuda_version = match.group(1)
                        break
    except:
        pass
    
    print(f"检测到的CUDA版本: {cuda_version if cuda_version else '未检测到'}")
    
    # 推荐安装命令
    install_commands = {
        "12.1": "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121",
        "11.8": "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118",
        "cpu": "pip install torch torchvision torchaudio"
    }
    
    print("\n推荐的安装步骤:")
    print("1. 卸载现有PyTorch:")
    print("   pip uninstall torch torchvision torchaudio -y")
    
    print("\n2. 安装支持CUDA的PyTorch:")
    if cuda_version:
        if cuda_version.startswith("12"):
            recommended_cmd = install_commands["12.1"]
            print(f"   {recommended_cmd}")
        elif cuda_version.startswith("11"):
            recommended_cmd = install_commands["11.8"]
            print(f"   {recommended_cmd}")
        else:
            print("   访问 https://pytorch.org/get-started/locally/ 选择合适版本")
    else:
        print("   由于未检测到CUDA，推荐安装最新CUDA 12.1版本:")
        print(f"   {install_commands['12.1']}")
    
    print("\n3. 或者如果只有CPU，安装CPU版本:")
    print(f"   {install_commands['cpu']}")
    
    # 询问是否自动安装
    try:
        choice = input("\n是否自动执行安装? (y/n): ").lower().strip()
        if choice in ['y', 'yes']:
            print("\n开始安装...")
            
            # 卸载现有版本
            print("卸载现有PyTorch...")
            subprocess.run([sys.executable, '-m', 'pip', 'uninstall', 'torch', 'torchvision', 'torchaudio', '-y'])
            
            # 安装新版本
            if cuda_version and cuda_version.startswith("12"):
                cmd = install_commands["12.1"].split()
            elif cuda_version and cuda_version.startswith("11"):
                cmd = install_commands["11.8"].split()
            else:
                cmd = install_commands["12.1"].split()  # 默认最新版本
            
            print(f"执行: {' '.join(cmd)}")
            result = subprocess.run(cmd)
            
            if result.returncode == 0:
                print("✅ PyTorch安装完成!")
                return True
            else:
                print("❌ PyTorch安装失败")
                return False
        else:
            print("请手动执行上述安装命令")
            return False
    except KeyboardInterrupt:
        print("\n安装被取消")
        return False

def create_fixed_quick_train():
    """创建修复后的训练脚本"""
    print("\n" + "="*50)
    print("创建修复后的训练脚本")
    print("="*50)
    
    fixed_script = '''#!/usr/bin/env python3
"""
修复版快速训练安全帽检测模型
解决GPU检测和OpenMP冲突问题
"""

import os
import sys

# 修复OpenMP冲突 - 必须在导入torch之前设置
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['OMP_NUM_THREADS'] = '1'

from ultralytics import YOLO
import torch

def comprehensive_gpu_check():
    """全面的GPU检查"""
    print("=" * 60)
    print("GPU检测和环境检查")
    print("=" * 60)
    
    # 环境变量检查
    print("环境变量设置:")
    print(f"  KMP_DUPLICATE_LIB_OK: {os.environ.get('KMP_DUPLICATE_LIB_OK', 'Not set')}")
    print(f"  OMP_NUM_THREADS: {os.environ.get('OMP_NUM_THREADS', 'Not set')}")
    
    # PyTorch版本检查
    print(f"\\nPyTorch版本: {torch.__version__}")
    print(f"CUDA编译支持: {torch.version.cuda if torch.version.cuda else 'None'}")
    print(f"CUDA运行时可用: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        print(f"✅ GPU加速可用!")
        print(f"GPU数量: {torch.cuda.device_count()}")
        
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"GPU {i}: {props.name}")
            print(f"  显存: {props.total_memory / 1024**3:.1f} GB")
            print(f"  计算能力: {props.major}.{props.minor}")
            
        # GPU可用性测试
        try:
            test_tensor = torch.randn(100, 100).cuda()
            result = torch.matmul(test_tensor, test_tensor)
            print(f"✅ GPU计算测试通过")
            del test_tensor, result
            torch.cuda.empty_cache()
            return True
        except Exception as e:
            print(f"❌ GPU测试失败: {e}")
            return False
    else:
        print("❌ GPU不可用，将使用CPU训练")
        print("\\n可能原因:")
        print("1. 安装了CPU版本的PyTorch")
        print("2. 没有NVIDIA GPU")
        print("3. CUDA驱动问题")
        return False

def auto_configure_device():
    """自动配置设备和参数"""
    gpu_available = comprehensive_gpu_check()
    
    if gpu_available:
        device = 'cuda'
        batch_size = 16
        workers = 4
        print(f"\\n🚀 配置: GPU加速训练")
    else:
        device = 'cpu'
        batch_size = 8
        workers = 2
        print(f"\\n⚠️ 配置: CPU训练")
    
    print(f"设备: {device}")
    print(f"批次大小: {batch_size}")
    print(f"工作线程: {workers}")
    
    return device, batch_size, workers

def main():
    print("=" * 60)
    print("修复版安全帽检测模型训练")
    print("=" * 60)
    
    # 自动配置设备
    device, batch_size, workers = auto_configure_device()
    
    # 检查数据集
    if not os.path.exists('data.yaml'):
        print("\\n❌ 找不到data.yaml文件")
        print("请确保数据集配置文件存在")
        return
    
    # 加载模型
    print("\\n📦 加载YOLOv8模型...")
    try:
        model = YOLO('yolov8n.pt')
        print("✅ 模型加载成功")
    except Exception as e:
        print(f"❌ 模型加载失败: {e}")
        return
    
    # 开始训练
    print("\\n🎯 开始训练...")
    try:
        results = model.train(
            data='data.yaml',
            epochs=50,
            imgsz=640,
            batch=batch_size,
            device=device,
            workers=workers,
            project='runs/detect',
            name='helmet_detection_fixed',
            save=True,
            save_period=10,
            patience=15,
            cache=False,
            augment=True,
            verbose=True,
            # 禁用pin_memory避免警告
            pin_memory=False if device == 'cpu' else True
        )
        
        print("\\n🎉 训练完成!")
        print(f"最佳模型: runs/detect/helmet_detection_fixed/weights/best.pt")
        
        # 验证模型
        print("\\n📊 验证模型...")
        metrics = model.val(device=device)
        print(f"mAP50: {metrics.box.map50:.3f}")
        print(f"mAP50-95: {metrics.box.map:.3f}")
        
    except Exception as e:
        print(f"\\n❌ 训练失败: {e}")
        print("\\n故障排除:")
        print("1. 检查显存是否足够")
        print("2. 尝试减小batch_size")
        print("3. 确认数据集路径正确")
        
        # 如果是显存问题，尝试CPU训练
        if "CUDA out of memory" in str(e) or "memory" in str(e).lower():
            print("\\n🔄 显存不足，尝试CPU训练...")
            try:
                results = model.train(
                    data='data.yaml',
                    epochs=50,
                    imgsz=640,
                    batch=4,  # 更小的batch
                    device='cpu',
                    workers=2,
                    project='runs/detect',
                    name='helmet_detection_cpu',
                    save=True,
                    save_period=10,
                    patience=15,
                    cache=False,
                    augment=True,
                    verbose=True,
                    pin_memory=False
                )
                print("✅ CPU训练完成!")
            except Exception as e2:
                print(f"❌ CPU训练也失败: {e2}")

if __name__ == "__main__":
    main()
'''
    
    with open('quick_train_fixed.py', 'w', encoding='utf-8') as f:
        f.write(fixed_script)
    
    print("✅ 已创建修复版训练脚本: quick_train_fixed.py")

def create_environment_setup():
    """创建环境设置脚本"""
    print("\n" + "="*50)
    print("创建环境设置脚本")
    print("="*50)
    
    setup_script = '''@echo off
REM Windows环境变量设置脚本
REM 解决OpenMP冲突和优化训练环境

echo 设置训练环境变量...

REM 修复OpenMP冲突
set KMP_DUPLICATE_LIB_OK=TRUE
set OMP_NUM_THREADS=1

REM 优化CUDA设置
set CUDA_LAUNCH_BLOCKING=1
set PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:128

echo 环境变量已设置:
echo   KMP_DUPLICATE_LIB_OK=%KMP_DUPLICATE_LIB_OK%
echo   OMP_NUM_THREADS=%OMP_NUM_THREADS%
echo   CUDA_LAUNCH_BLOCKING=%CUDA_LAUNCH_BLOCKING%
echo   PYTORCH_CUDA_ALLOC_CONF=%PYTORCH_CUDA_ALLOC_CONF%

echo.
echo 现在可以运行训练脚本:
echo   python quick_train_fixed.py

pause
'''
    
    with open('setup_env.bat', 'w', encoding='utf-8') as f:
        f.write(setup_script)
    
    print("✅ 已创建环境设置脚本: setup_env.bat")
    print("使用方法: 双击运行 setup_env.bat 然后运行训练脚本")

def main():
    """主函数"""
    print("🔧 GPU训练问题修复工具")
    print("解决PyTorch GPU检测和OpenMP冲突问题")
    
    # 修复OpenMP冲突
    fix_openmp_conflict()
    
    # 检测GPU
    nvidia_gpu, amd_gpu, intel_gpu = detect_gpu_type()
    
    # 检查PyTorch
    pytorch_ok = check_current_pytorch()
    
    if nvidia_gpu and not pytorch_ok:
        print("\n🎯 检测到NVIDIA GPU但PyTorch不支持CUDA")
        install_pytorch_cuda()
    elif not nvidia_gpu:
        print("\n⚠️ 未检测到NVIDIA GPU，将使用CPU训练")
    
    # 创建修复版脚本
    create_fixed_quick_train()
    create_environment_setup()
    
    print("\n" + "="*60)
    print("修复完成!")
    print("="*60)
    
    print("\n📋 使用步骤:")
    print("1. 如果需要重新安装PyTorch，按上述安装命令执行")
    print("2. 运行: python quick_train_fixed.py")
    print("3. 或者先运行: setup_env.bat (Windows)")
    
    print("\n🔍 故障排除:")
    print("- 如果仍有OpenMP错误：重启终端后再试")
    print("- 如果GPU不识别：检查NVIDIA驱动是否最新")
    print("- 如果显存不足：脚本会自动降级到CPU训练")

if __name__ == "__main__":
    main() 