"""
GPU并行配置文件
用于优化训练时的GPU使用和多线程设置
"""

import os
import torch
import psutil

def setup_gpu_parallel():
    """
    设置GPU并行训练环境
    """
    # 1. 设置环境变量
    os.environ['OMP_NUM_THREADS'] = str(min(8, psutil.cpu_count()))  # OpenMP线程数
    os.environ['MKL_NUM_THREADS'] = str(min(8, psutil.cpu_count()))  # MKL线程数
    os.environ['NUMEXPR_NUM_THREADS'] = str(min(8, psutil.cpu_count()))  # NumExpr线程数
    
    # 2. CUDA设置
    if torch.cuda.is_available():
        # 清空GPU缓存
        torch.cuda.empty_cache()
        
        # 设置CUDA优化
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False
        torch.backends.cudnn.enabled = True
        
        # 设置内存分配策略
        torch.cuda.set_per_process_memory_fraction(0.9)
        
        # 打印GPU信息
        print(f"GPU设备信息:")
        print(f"  设备数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"  GPU {i}: {props.name}")
            print(f"    内存: {props.total_memory / 1024**3:.1f} GB")
            print(f"    计算能力: {props.major}.{props.minor}")
        
        return True
    else:
        print("CUDA不可用，将使用CPU训练")
        return False

def get_optimal_workers():
    """
    根据CPU核心数获取最优的DataLoader workers数量
    """
    cpu_count = psutil.cpu_count()
    # 通常设置为CPU核心数的1/2到3/4
    optimal_workers = max(1, min(cpu_count // 2, 8))
    return optimal_workers

def setup_multi_gpu_model(model):
    """
    设置多GPU模型
    """
    if torch.cuda.device_count() > 1:
        print(f"检测到 {torch.cuda.device_count()} 个GPU，启用DataParallel")
        model = torch.nn.DataParallel(model)
    return model

def get_batch_size_multiplier():
    """
    根据GPU数量调整批次大小倍数
    """
    if torch.cuda.is_available():
        return torch.cuda.device_count()
    return 1

def monitor_gpu_memory():
    """
    监控GPU内存使用情况
    """
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            allocated = torch.cuda.memory_allocated(i) / 1024**3
            cached = torch.cuda.memory_reserved(i) / 1024**3
            total = torch.cuda.get_device_properties(i).total_memory / 1024**3
            print(f"GPU {i} 内存使用: {allocated:.2f}GB / {cached:.2f}GB / {total:.2f}GB")

# 预设配置
GPU_CONFIGS = {
    'single_gpu': {
        'workers': 4,
        'batch_size_multiplier': 1,
        'pin_memory': True,
        'persistent_workers': True
    },
    'multi_gpu': {
        'workers': 8,
        'batch_size_multiplier': 2,
        'pin_memory': True,
        'persistent_workers': True
    },
    'high_memory': {
        'workers': 6,
        'batch_size_multiplier': 1.5,
        'pin_memory': True,
        'persistent_workers': True,
        'memory_fraction': 0.95
    }
}

def get_gpu_config(config_name='auto'):
    """
    获取GPU配置
    """
    if config_name == 'auto':
        if torch.cuda.is_available():
            if torch.cuda.device_count() > 1:
                return GPU_CONFIGS['multi_gpu']
            else:
                return GPU_CONFIGS['single_gpu']
        else:
            return GPU_CONFIGS['single_gpu']
    else:
        return GPU_CONFIGS.get(config_name, GPU_CONFIGS['single_gpu']) 