#!/usr/bin/env python3
"""
终极修复版训练脚本
彻底解决OpenMP冲突和GPU检测问题
"""

import os
import sys

# 必须在导入任何其他库之前设置环境变量
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'

# 禁用Intel MKL的警告
os.environ['MKL_DYNAMIC'] = 'FALSE'
os.environ['MKL_VERBOSE'] = '0'

print("🔧 OpenMP环境修复完成")
print(f"  KMP_DUPLICATE_LIB_OK: {os.environ.get('KMP_DUPLICATE_LIB_OK')}")
print(f"  OMP_NUM_THREADS: {os.environ.get('OMP_NUM_THREADS')}")

# 现在安全导入其他库
from ultralytics import YOLO
import torch
import warnings

# 抑制一些不重要的警告
warnings.filterwarnings('ignore', category=UserWarning)

def ultimate_gpu_check():
    """终极GPU检查和配置"""
    print("\n" + "="*60)
    print("终极GPU检测和配置")
    print("="*60)
    
    # PyTorch版本检查
    print(f"PyTorch版本: {torch.__version__}")
    print(f"CUDA编译支持: {torch.version.cuda if torch.version.cuda else 'CPU版本'}")
    print(f"CUDA运行时可用: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        print(f"✅ GPU加速可用!")
        print(f"GPU数量: {torch.cuda.device_count()}")
        
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"GPU {i}: {props.name}")
            print(f"  显存: {props.total_memory / (1024**3):.1f} GB")
            print(f"  计算能力: {props.major}.{props.minor}")
            
        # GPU性能测试
        try:
            print("\n🧪 GPU性能测试...")
            device = torch.device('cuda:0')
            
            # 创建测试张量
            x = torch.randn(1000, 1000, device=device)
            y = torch.randn(1000, 1000, device=device)
            
            # 预热GPU
            for _ in range(5):
                _ = torch.matmul(x, y)
            
            torch.cuda.synchronize()
            import time
            start_time = time.time()
            
            # 性能测试
            for _ in range(10):
                result = torch.matmul(x, y)
            
            torch.cuda.synchronize()
            end_time = time.time()
            
            print(f"✅ GPU计算测试通过")
            print(f"10次矩阵乘法用时: {(end_time - start_time)*1000:.2f} ms")
            
            # 清理显存
            del x, y, result
            torch.cuda.empty_cache()
            
            return True
            
        except Exception as e:
            print(f"❌ GPU测试失败: {e}")
            return False
    else:
        print("❌ GPU不可用，将使用CPU训练")
        print("\n可能原因:")
        print("1. 没有NVIDIA GPU或驱动版本不兼容")
        print("2. PyTorch版本与CUDA驱动不匹配")
        return False

def smart_device_config():
    """智能设备配置"""
    gpu_ok = ultimate_gpu_check()
    
    if gpu_ok:
        device = 'cuda'
        # 根据显存智能调整batch size
        gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
        
        if gpu_memory >= 6:
            batch_size = 16
            workers = 4
        elif gpu_memory >= 4:
            batch_size = 12
            workers = 3
        else:
            batch_size = 8
            workers = 2
            
        print(f"\n🚀 GPU配置 (显存: {gpu_memory:.1f}GB)")
        
    else:
        device = 'cpu'
        batch_size = 4  # CPU用更小的batch
        workers = 2
        print(f"\n⚠️ CPU配置")
    
    print(f"设备: {device}")
    print(f"批次大小: {batch_size}")
    print(f"工作线程: {workers}")
    
    return device, batch_size, workers

def main():
    print("="*60)
    print("🛠️ 终极修复版安全帽检测训练")
    print("="*60)
    
    # 智能配置设备
    device, batch_size, workers = smart_device_config()
    
    # 检查数据集
    if not os.path.exists('data.yaml'):
        print("\n❌ 找不到data.yaml文件")
        print("请确保数据集配置文件存在")
        return
    
    # 加载模型
    print("\n📦 加载YOLOv8模型...")
    try:
        model = YOLO('yolov8n.pt')
        print("✅ 模型加载成功")
    except Exception as e:
        print(f"❌ 模型加载失败: {e}")
        return
    
    # 训练配置
    train_config = {
        'data': 'data.yaml',
        'epochs': 50,
        'imgsz': 640,
        'batch': batch_size,
        'device': device,
        'workers': workers,
        'project': 'runs/detect',
        'name': 'helmet_detection_ultimate',
        'save': True,
        'save_period': 10,
        'patience': 15,
        'cache': False,
        'augment': True,
        'verbose': True,
        'amp': True if device == 'cuda' else False,  # 启用混合精度训练
        'pin_memory': False,  # 避免pin_memory警告
    }
    
    print(f"\n🎯 开始训练...")
    print(f"训练配置: {train_config}")
    
    try:
        results = model.train(**train_config)
        
        print("\n🎉 训练完成!")
        print(f"最佳模型: runs/detect/helmet_detection_ultimate/weights/best.pt")
        
        # 验证模型
        print("\n📊 验证模型...")
        metrics = model.val(device=device)
        print(f"mAP50: {metrics.box.map50:.3f}")
        print(f"mAP50-95: {metrics.box.map:.3f}")
        
        return True
        
    except Exception as e:
        print(f"\n❌ 训练失败: {e}")
        
        # 错误分析和自动降级
        error_msg = str(e).lower()
        
        if "cuda out of memory" in error_msg or "memory" in error_msg:
            print("\n🔄 显存不足，降级处理...")
            
            # 降级策略
            if device == 'cuda' and batch_size > 4:
                print(f"尝试减小batch_size从{batch_size}到4...")
                train_config['batch'] = 4
            else:
                print("切换到CPU训练...")
                train_config['device'] = 'cpu'
                train_config['batch'] = 4
                train_config['workers'] = 2
                train_config['amp'] = False
            
            # 重试训练
            try:
                print("🔄 重新开始训练...")
                results = model.train(**train_config)
                print("✅ 降级训练成功!")
                return True
            except Exception as e2:
                print(f"❌ 降级训练也失败: {e2}")
                
        else:
            print(f"\n其他错误: {e}")
            print("\n故障排除建议:")
            print("1. 检查数据集格式是否正确")
            print("2. 确认data.yaml配置无误")
            print("3. 重启终端重新运行")
        
        return False

if __name__ == "__main__":
    try:
        success = main()
        if success:
            print("\n🎊 训练任务完成!")
        else:
            print("\n💥 训练任务失败!")
    except KeyboardInterrupt:
        print("\n⏸️ 训练被用户中断")
    except Exception as e:
        print(f"\n💀 未预期的错误: {e}")
        import traceback
        traceback.print_exc() 