#!/usr/bin/env python3
"""
GTX 1050 Ti 优化的改进训练脚本
结合 train_helmet_detection.py 的优势，适配4GB显存
"""

import os
import sys
import gc
import psutil

# 修复OpenMP冲突和GPU内存优化
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

from ultralytics import YOLO
import torch
import warnings
import time
from pathlib import Path

warnings.filterwarnings('ignore', category=UserWarning)

def clear_gpu_memory():
    """清理GPU内存"""
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        gc.collect()
        print("🧹 清理GPU缓存完成")

def show_memory_status():
    """显示内存状态"""
    memory = psutil.virtual_memory()
    print(f"💾 系统内存: {memory.used/1024**3:.1f}GB / {memory.total/1024**3:.1f}GB ({memory.percent:.1f}%)")
    
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            allocated = torch.cuda.memory_allocated(i) / 1024**3
            cached = torch.cuda.memory_reserved(i) / 1024**3
            total = props.total_memory / 1024**3
            print(f"🎮 GPU {i} ({props.name}): {allocated:.2f}GB 已用 + {cached:.2f}GB 缓存 / {total:.1f}GB 总计")

def check_gpu_and_optimize():
    """检查GPU并优化配置"""
    print("🔍 内存状态检查:")
    show_memory_status()
    
    # 清理初始内存
    clear_gpu_memory()
    
    if torch.cuda.is_available():
        gpu_props = torch.cuda.get_device_properties(0)
        gpu_memory = gpu_props.total_memory / (1024**3)
        gpu_name = gpu_props.name
        
        print(f"✅ 检测到GPU: {gpu_name}")
        print(f"显存: {gpu_memory:.1f}GB")
        
        # 根据显存优化配置 - 更保守的参数避免OOM
        if gpu_memory <= 4:  # GTX 1050 Ti
            config = {
                'device': 'cuda',
                'batch': 4,     # 减小避免内存不足
                'workers': 1,   # 减少工作线程
                'amp': True,    # 启用混合精度节省显存
                'cache': False, # 不缓存到内存
                'imgsz': 416,   # 减小图像尺寸
            }
            print("🎯 4GB显存保守优化配置")
        elif gpu_memory <= 6:
            config = {
                'device': 'cuda',
                'batch': 8,
                'workers': 2,
                'amp': True,
                'cache': False,
                'imgsz': 640,
            }
            print("🎯 6GB显存配置")
        else:
            config = {
                'device': 'cuda',
                'batch': 16,
                'workers': 4,
                'amp': True,
                'cache': 'ram',
                'imgsz': 640,
            }
            print("🎯 大显存配置")
            
        return config
    else:
        print("❌ 未检测到GPU，使用CPU")
        return {
            'device': 'cpu',
            'batch': 4,
            'workers': 2,
            'amp': False,
            'cache': False,
            'imgsz': 416,
        }

def train_with_model_progression():
    """使用模型递进训练策略"""
    
    # 检查数据集
    if not os.path.exists('data.yaml'):
        print("❌ 找不到data.yaml文件")
        return
    
    # 获取GPU优化配置
    config = check_gpu_and_optimize()
    
    print("\n" + "="*60)
    print("🚀 开始递进训练策略")
    print("="*60)
    
    # 阶段1: YOLOv8n 快速训练 (获得baseline)
    print("\n📍 阶段1: YOLOv8n 基础训练")
    print("-" * 40)
    
    model_n = YOLO('yolov8n.pt')
    
    try:
        print(f"📦 训练参数: batch={config['batch']}, imgsz={config['imgsz']}, workers={config['workers']}")
        
        results_n = model_n.train(
            data='data.yaml',
            epochs=60,  # 比quick_train稍多
            imgsz=config['imgsz'],
            batch=config['batch'],
            device=config['device'],
            workers=config['workers'],
            project='runs/detect',
            name='helmet_n_improved',
            save=True,
            save_period=20,
            patience=20,
            amp=config['amp'],
            cache=config['cache'],
            augment=True,
            verbose=True,
            # 数据增强参数
            hsv_h=0.015,      # 色调增强
            hsv_s=0.7,        # 饱和度增强  
            hsv_v=0.4,        # 亮度增强
            degrees=10.0,     # 旋转角度
            translate=0.1,    # 平移
            scale=0.5,        # 缩放
            shear=0.0,        # 剪切
            perspective=0.0,  # 透视
            flipud=0.0,       # 垂直翻转
            fliplr=0.5,       # 水平翻转 
            mosaic=1.0,       # Mosaic增强
            mixup=0.0,        # Mixup增强
            copy_paste=0.0,   # 复制粘贴增强
        )
        
        # 验证结果
        metrics_n = model_n.val(device=config['device'])
        print(f"\n📊 YOLOv8n 结果:")
        print(f"mAP50: {metrics_n.box.map50:.3f}")
        print(f"mAP50-95: {metrics_n.box.map:.3f}")
        
        baseline_map50 = metrics_n.box.map50
        
    except torch.cuda.OutOfMemoryError as e:
        print(f"❌ YOLOv8n GPU内存不足: {e}")
        print("🔧 尝试降级参数...")
        clear_gpu_memory()
        
        # 降级参数重试
        try:
            model_n = YOLO('yolov8n.pt')
            results_n = model_n.train(
                data='data.yaml',
                epochs=50,
                imgsz=320,  # 更小图像
                batch=2,    # 最小批次
                device=config['device'],
                workers=0,  # 不使用多进程
                project='runs/detect',
                name='helmet_n_fallback',
                amp=False,  # 禁用混合精度
                cache=False,
                verbose=True,
            )
            metrics_n = model_n.val(device=config['device'])
            baseline_map50 = metrics_n.box.map50
            print(f"✅ 降级训练成功，mAP50: {baseline_map50:.3f}")
        except Exception as e2:
            print(f"❌ 降级训练也失败: {e2}")
            baseline_map50 = 0
    except Exception as e:
        print(f"❌ YOLOv8n 训练失败: {e}")
        baseline_map50 = 0
    
    # 阶段2: YOLOv8s 精进训练 (如果显存足够)
    if config['device'] == 'cuda' and baseline_map50 > 0.6:  # 只有基础模型效果好才继续
        print(f"\n📍 阶段2: YOLOv8s 精进训练 (基于 mAP50={baseline_map50:.3f})")
        print("-" * 40)
        
        # 稍微减少batch size给更大模型
        config['batch'] = max(8, config['batch'] - 4)
        
        model_s = YOLO('yolov8s.pt')
        
        try:
            print(f"📦 YOLOv8s训练参数: batch={config['batch']}, imgsz={config['imgsz']}")
            
            results_s = model_s.train(
                data='data.yaml',
                epochs=80,  # 更多轮次
                imgsz=config['imgsz'],
                batch=config['batch'],
                device=config['device'],
                workers=config['workers'],
                project='runs/detect',
                name='helmet_s_improved',
                save=True,
                save_period=20,
                patience=25,
                amp=config['amp'],
                cache=config['cache'],
                augment=True,
                verbose=True,
                # 更激进的数据增强
                hsv_h=0.02,
                hsv_s=0.8,
                hsv_v=0.5,
                degrees=15.0,
                translate=0.15,
                scale=0.6,
                flipud=0.1,     # 少量垂直翻转
                fliplr=0.5,
                mosaic=1.0,
                mixup=0.1,      # 启用少量mixup
                copy_paste=0.1, # 启用少量copy-paste
            )
            
            metrics_s = model_s.val(device=config['device'])
            print(f"\n📊 YOLOv8s 结果:")
            print(f"mAP50: {metrics_s.box.map50:.3f}")
            print(f"mAP50-95: {metrics_s.box.map:.3f}")
            print(f"相比YOLOv8n提升: {metrics_s.box.map50 - baseline_map50:.3f}")
            
        except torch.cuda.OutOfMemoryError as e:
            print(f"❌ YOLOv8s GPU内存不足: {e}")
            print("🔧 YOLOv8s显存不足，跳过精进训练")
            clear_gpu_memory()
        except Exception as e:
            print(f"❌ YOLOv8s 训练失败: {e}")
            print("可能是显存不足，请尝试减小batch_size")
    
    print(f"\n🎉 递进训练完成!")
    print(f"最佳模型保存在: runs/detect/")
    
    # 显示最终内存状态
    print(f"\n📊 最终内存状态:")
    show_memory_status()
    clear_gpu_memory()

def main():
    print("="*60)
    print("🛠️ GTX 1050 Ti 优化训练脚本")
    print("="*60)
    
    start_time = time.time()
    
    try:
        train_with_model_progression()
        
        end_time = time.time()
        total_time = end_time - start_time
        print(f"\n⏱️ 总训练时间: {total_time/3600:.1f} 小时")
        
    except KeyboardInterrupt:
        print("\n⏸️ 训练被用户中断")
    except Exception as e:
        print(f"\n💥 训练过程出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main() 