#!/usr/bin/env python3
"""
GPU内存管理工具模块

提供GPU内存清理和状态检查功能，用于解决vLLM内存管理问题
"""

import os
import gc
import time
import subprocess
from typing import List, Dict, Optional
import logging

logger = logging.getLogger(__name__)


def get_gpu_memory_info() -> Dict[int, Dict[str, int]]:
    """
    获取所有GPU的内存使用信息
    
    Returns:
        Dict[int, Dict[str, int]]: GPU内存信息字典，格式为 {gpu_id: {'used': used_mb, 'total': total_mb}}
    """
    try:
        result = subprocess.run(['nvidia-smi', '--query-gpu=index,memory.used,memory.total', 
                               '--format=csv,noheader,nounits'], 
                              capture_output=True, text=True, check=True)
        
        gpu_info = {}
        for line in result.stdout.strip().split('\n'):
            if line.strip():
                parts = line.strip().split(', ')
                gpu_id = int(parts[0])
                used_mb = int(parts[1])
                total_mb = int(parts[2])
                gpu_info[gpu_id] = {'used': used_mb, 'total': total_mb, 'free': total_mb - used_mb}
        
        return gpu_info
    except (subprocess.CalledProcessError, FileNotFoundError) as e:
        logger.warning(f"无法获取GPU内存信息: {e}")
        return {}


def kill_gpu_processes(gpu_ids: Optional[List[int]] = None) -> bool:
    """
    终止指定GPU上的所有进程
    
    Args:
        gpu_ids: 要清理的GPU ID列表，如果为None则清理所有GPU
        
    Returns:
        bool: 是否成功清理
    """
    try:
        # 获取GPU进程信息
        result = subprocess.run(['nvidia-smi', '--query-compute-apps=gpu_uuid,pid', 
                               '--format=csv,noheader'], 
                              capture_output=True, text=True, check=True)
        
        if not result.stdout.strip():
            logger.info("没有发现GPU进程")
            return True
        
        # 获取要清理的GPU UUID
        target_uuids = set()
        if gpu_ids is not None:
            gpu_uuid_result = subprocess.run(['nvidia-smi', '--query-gpu=index,uuid', 
                                            '--format=csv,noheader'], 
                                           capture_output=True, text=True, check=True)
            for line in gpu_uuid_result.stdout.strip().split('\n'):
                if line.strip():
                    parts = line.strip().split(', ')
                    gpu_id = int(parts[0])
                    if gpu_id in gpu_ids:
                        target_uuids.add(parts[1])
        
        # 终止进程
        pids_to_kill = []
        for line in result.stdout.strip().split('\n'):
            if line.strip():
                parts = line.strip().split(', ')
                gpu_uuid = parts[0]
                pid = int(parts[1])
                
                if gpu_ids is None or gpu_uuid in target_uuids:
                    pids_to_kill.append(pid)
        
        if pids_to_kill:
            logger.info(f"正在终止GPU进程: {pids_to_kill}")
            for pid in pids_to_kill:
                try:
                    os.kill(pid, 9)  # SIGKILL
                    logger.info(f"已终止进程 {pid}")
                except ProcessLookupError:
                    logger.info(f"进程 {pid} 已不存在")
                except PermissionError:
                    logger.warning(f"无权限终止进程 {pid}")
        
        return True
        
    except (subprocess.CalledProcessError, FileNotFoundError) as e:
        logger.error(f"清理GPU进程失败: {e}")
        return False


def clear_gpu_memory(gpu_ids: Optional[List[int]] = None, force: bool = True) -> bool:
    """
    清理GPU内存
    
    Args:
        gpu_ids: 要清理的GPU ID列表，如果为None则清理所有GPU
        force: 是否强制清理（终止GPU进程）
        
    Returns:
        bool: 是否成功清理
    """
    logger.info("开始清理GPU内存...")
    
    # 1. Python垃圾回收
    gc.collect()
    
    # 2. 清理CUDA缓存（如果可用）
    try:
        import torch
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            logger.info("已清理CUDA缓存")
    except ImportError:
        logger.info("PyTorch不可用，跳过CUDA缓存清理")
    
    # 3. 强制清理GPU进程
    if force:
        success = kill_gpu_processes(gpu_ids)
        if not success:
            logger.warning("GPU进程清理失败")
        
        # 等待进程完全终止
        time.sleep(2)
    
    # 4. 验证清理效果
    gpu_info = get_gpu_memory_info()
    if gpu_info:
        for gpu_id, info in gpu_info.items():
            if gpu_ids is None or gpu_id in gpu_ids:
                used_gb = info['used'] / 1024
                total_gb = info['total'] / 1024
                logger.info(f"GPU {gpu_id}: 使用 {used_gb:.1f}GB / {total_gb:.1f}GB")
    
    logger.info("GPU内存清理完成")
    return True


def check_memory_requirements(required_memory_gb: float, gpu_ids: Optional[List[int]] = None) -> bool:
    """
    检查GPU内存是否满足要求
    
    Args:
        required_memory_gb: 所需内存（GB）
        gpu_ids: 要检查的GPU ID列表
        
    Returns:
        bool: 是否满足内存要求
    """
    gpu_info = get_gpu_memory_info()
    if not gpu_info:
        logger.warning("无法获取GPU内存信息")
        return False
    
    target_gpus = gpu_ids if gpu_ids is not None else list(gpu_info.keys())
    
    for gpu_id in target_gpus:
        if gpu_id not in gpu_info:
            logger.error(f"GPU {gpu_id} 不存在")
            return False
        
        free_gb = gpu_info[gpu_id]['free'] / 1024
        if free_gb < required_memory_gb:
            logger.error(f"GPU {gpu_id} 可用内存不足: {free_gb:.1f}GB < {required_memory_gb:.1f}GB")
            return False
        
        logger.info(f"GPU {gpu_id} 可用内存: {free_gb:.1f}GB")
    
    return True


def setup_gpu_environment() -> None:
    """
    设置GPU环境变量，优化内存使用
    """
    # 设置CUDA内存分配策略
    os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
    
    # 禁用CUDA启动阻塞（用于调试）
    os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
    
    # vLLM相关设置
    os.environ['VLLM_TORCH_COMPILE_LEVEL'] = '0'
    os.environ['VLLM_DISABLE_CUSTOM_ALL_REDUCE'] = '1'
    
    # vLLM V1引擎内存管理设置 - 解决内存检查问题
    os.environ['VLLM_ALLOW_RUNTIME_LORA_UPDATING'] = '1'
    os.environ['VLLM_DISABLE_STRICT_MEMORY_CHECK'] = '1'
    
    # 设置更保守的内存使用策略
    os.environ['VLLM_GPU_MEMORY_UTILIZATION'] = '0.85'
    
    logger.info("已设置GPU环境变量")


def wait_for_gpu_ready(max_wait_seconds: int = 30) -> bool:
    """
    等待GPU准备就绪
    
    Args:
        max_wait_seconds: 最大等待时间（秒）
        
    Returns:
        bool: GPU是否准备就绪
    """
    logger.info("等待GPU准备就绪...")
    
    for i in range(max_wait_seconds):
        try:
            result = subprocess.run(['nvidia-smi'], capture_output=True, check=True)
            if result.returncode == 0:
                logger.info("GPU已准备就绪")
                return True
        except subprocess.CalledProcessError:
            pass
        
        time.sleep(1)
    
    logger.error(f"GPU在 {max_wait_seconds} 秒内未准备就绪")
    return False 


def reset_gpu_state(gpu_ids: Optional[List[int]] = None, wait_time: int = 5) -> bool:
    """
    彻底重置GPU状态，解决vLLM内存检查问题
    
    Args:
        gpu_ids: 要重置的GPU ID列表，如果为None则重置所有GPU
        wait_time: 重置后等待时间（秒）
        
    Returns:
        bool: 是否成功重置
    """
    logger.info("开始重置GPU状态...")
    
    # 1. 强制清理所有GPU进程
    success = kill_gpu_processes(gpu_ids)
    if not success:
        logger.warning("GPU进程清理失败")
    
    # 2. 等待进程完全终止
    time.sleep(2)
    
    # 3. 重置CUDA设备（如果可用）
    try:
        import torch
        if torch.cuda.is_available():
            # 清理所有CUDA缓存
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            
            # 重置所有CUDA设备
            device_count = torch.cuda.device_count()
            target_devices = gpu_ids if gpu_ids is not None else list(range(device_count))
            
            for device_id in target_devices:
                if device_id < device_count:
                    with torch.cuda.device(device_id):
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
            
            logger.info("已重置CUDA设备状态")
    except ImportError:
        logger.info("PyTorch不可用，跳过CUDA设备重置")
    
    # 4. Python垃圾回收
    gc.collect()
    
    # 5. 等待GPU状态稳定
    logger.info(f"等待GPU状态稳定 ({wait_time}秒)...")
    time.sleep(wait_time)
    
    # 6. 验证重置效果
    gpu_info = get_gpu_memory_info()
    if gpu_info:
        for gpu_id, info in gpu_info.items():
            if gpu_ids is None or gpu_id in gpu_ids:
                used_gb = info['used'] / 1024
                total_gb = info['total'] / 1024
                free_gb = info['free'] / 1024
                logger.info(f"GPU {gpu_id}: 使用 {used_gb:.1f}GB, 可用 {free_gb:.1f}GB / {total_gb:.1f}GB")
    
    logger.info("GPU状态重置完成")
    return True 