#!/usr/bin/env python3
"""
修复版快速训练安全帽检测模型
解决GPU检测和OpenMP冲突问题
"""

import os
import sys

# 修复OpenMP冲突 - 必须在导入torch之前设置
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['OMP_NUM_THREADS'] = '1'

from ultralytics import YOLO
import torch

def comprehensive_gpu_check():
    """全面的GPU检查"""
    print("=" * 60)
    print("GPU检测和环境检查")
    print("=" * 60)
    
    # 环境变量检查
    print("环境变量设置:")
    print(f"  KMP_DUPLICATE_LIB_OK: {os.environ.get('KMP_DUPLICATE_LIB_OK', 'Not set')}")
    print(f"  OMP_NUM_THREADS: {os.environ.get('OMP_NUM_THREADS', 'Not set')}")
    
    # PyTorch版本检查
    print(f"\nPyTorch版本: {torch.__version__}")
    print(f"CUDA编译支持: {torch.version.cuda if torch.version.cuda else 'None'}")
    print(f"CUDA运行时可用: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        print(f"✅ GPU加速可用!")
        print(f"GPU数量: {torch.cuda.device_count()}")
        
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"GPU {i}: {props.name}")
            print(f"  显存: {props.total_memory / 1024**3:.1f} GB")
            print(f"  计算能力: {props.major}.{props.minor}")
            
        # GPU可用性测试
        try:
            test_tensor = torch.randn(100, 100).cuda()
            result = torch.matmul(test_tensor, test_tensor)
            print(f"✅ GPU计算测试通过")
            del test_tensor, result
            torch.cuda.empty_cache()
            return True
        except Exception as e:
            print(f"❌ GPU测试失败: {e}")
            return False
    else:
        print("❌ GPU不可用，将使用CPU训练")
        print("\n可能原因:")
        print("1. 安装了CPU版本的PyTorch")
        print("2. 没有NVIDIA GPU")
        print("3. CUDA驱动问题")
        return False

def auto_configure_device():
    """自动配置设备和参数"""
    gpu_available = comprehensive_gpu_check()
    
    if gpu_available:
        device = 'cuda'
        batch_size = 16
        workers = 4
        print(f"\n🚀 配置: GPU加速训练")
    else:
        device = 'cpu'
        batch_size = 8
        workers = 2
        print(f"\n⚠️ 配置: CPU训练")
    
    print(f"设备: {device}")
    print(f"批次大小: {batch_size}")
    print(f"工作线程: {workers}")
    
    return device, batch_size, workers

def main():
    print("=" * 60)
    print("修复版安全帽检测模型训练")
    print("=" * 60)
    
    # 自动配置设备
    device, batch_size, workers = auto_configure_device()
    
    # 检查数据集
    if not os.path.exists('data.yaml'):
        print("\n❌ 找不到data.yaml文件")
        print("请确保数据集配置文件存在")
        return
    
    # 加载模型
    print("\n📦 加载YOLOv8模型...")
    try:
        model = YOLO('yolov8n.pt')
        print("✅ 模型加载成功")
    except Exception as e:
        print(f"❌ 模型加载失败: {e}")
        return
    
    # 开始训练
    print("\n🎯 开始训练...")
    try:
        results = model.train(
            data='data.yaml',
            epochs=50,
            imgsz=640,
            batch=batch_size,
            device=device,
            workers=workers,
            project='runs/detect',
            name='helmet_detection_fixed',
            save=True,
            save_period=10,
            patience=15,
            cache=False,
            augment=True,
            verbose=True,
            # 禁用pin_memory避免警告
            pin_memory=False if device == 'cpu' else True
        )
        
        print("\n🎉 训练完成!")
        print(f"最佳模型: runs/detect/helmet_detection_fixed/weights/best.pt")
        
        # 验证模型
        print("\n📊 验证模型...")
        metrics = model.val(device=device)
        print(f"mAP50: {metrics.box.map50:.3f}")
        print(f"mAP50-95: {metrics.box.map:.3f}")
        
    except Exception as e:
        print(f"\n❌ 训练失败: {e}")
        print("\n故障排除:")
        print("1. 检查显存是否足够")
        print("2. 尝试减小batch_size")
        print("3. 确认数据集路径正确")
        
        # 如果是显存问题，尝试CPU训练
        if "CUDA out of memory" in str(e) or "memory" in str(e).lower():
            print("\n🔄 显存不足，尝试CPU训练...")
            try:
                results = model.train(
                    data='data.yaml',
                    epochs=50,
                    imgsz=640,
                    batch=4,  # 更小的batch
                    device='cpu',
                    workers=2,
                    project='runs/detect',
                    name='helmet_detection_cpu',
                    save=True,
                    save_period=10,
                    patience=15,
                    cache=False,
                    augment=True,
                    verbose=True,
                    pin_memory=False
                )
                print("✅ CPU训练完成!")
            except Exception as e2:
                print(f"❌ CPU训练也失败: {e2}")

if __name__ == "__main__":
    main()
