#!/usr/bin/env python3
"""
GPU重置脚本
在运行vLLM之前彻底清理GPU状态，解决内存检查问题
"""

import os
import sys
import time
import subprocess
import gc

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

def kill_all_gpu_processes():
    """
    终止所有GPU进程
    """
    print("正在终止所有GPU进程...")
    try:
        # 获取所有GPU进程
        result = subprocess.run(['nvidia-smi', '--query-compute-apps=pid', '--format=csv,noheader'], 
                              capture_output=True, text=True, check=True)
        
        if result.stdout.strip():
            pids = []
            for line in result.stdout.strip().split('\n'):
                if line.strip():
                    try:
                        pid = int(line.strip())
                        pids.append(pid)
                    except ValueError:
                        continue
            
            if pids:
                print(f"发现GPU进程: {pids}")
                for pid in pids:
                    try:
                        os.kill(pid, 9)  # SIGKILL
                        print(f"已终止进程 {pid}")
                    except ProcessLookupError:
                        print(f"进程 {pid} 已不存在")
                    except PermissionError:
                        print(f"无权限终止进程 {pid}")
            else:
                print("没有发现GPU进程")
        else:
            print("没有发现GPU进程")
            
    except (subprocess.CalledProcessError, FileNotFoundError) as e:
        print(f"获取GPU进程信息失败: {e}")
    
    # 等待进程完全终止
    time.sleep(3)

def reset_cuda_context():
    """
    重置CUDA上下文
    """
    print("正在重置CUDA上下文...")
    try:
        import torch
        if torch.cuda.is_available():
            device_count = torch.cuda.device_count()
            print(f"检测到 {device_count} 个GPU设备")
            
            # 清理所有设备的缓存
            for i in range(device_count):
                with torch.cuda.device(i):
                    torch.cuda.empty_cache()
                    torch.cuda.synchronize()
                print(f"已清理GPU {i} 的CUDA缓存")
            
            # 重置CUDA状态
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            print("CUDA上下文重置完成")
        else:
            print("CUDA不可用")
    except ImportError:
        print("PyTorch不可用，跳过CUDA重置")
    except Exception as e:
        print(f"CUDA重置失败: {e}")

def cleanup_system_memory():
    """
    清理系统内存
    """
    print("正在清理系统内存...")
    
    # Python垃圾回收
    gc.collect()
    
    # 尝试释放系统缓存（如果有权限）
    try:
        subprocess.run(['sync'], check=True)
        print("已同步文件系统")
    except (subprocess.CalledProcessError, FileNotFoundError):
        pass

def wait_for_gpu_stable(wait_seconds=10):
    """
    等待GPU状态稳定
    """
    print(f"等待GPU状态稳定 ({wait_seconds}秒)...")
    time.sleep(wait_seconds)
    
    # 检查最终GPU状态
    try:
        result = subprocess.run(['nvidia-smi', '--query-gpu=index,memory.used,memory.total', 
                               '--format=csv,noheader,nounits'], 
                              capture_output=True, text=True, check=True)
        
        print("当前GPU内存状态:")
        for line in result.stdout.strip().split('\n'):
            if line.strip():
                parts = line.strip().split(', ')
                gpu_id = int(parts[0])
                used_mb = int(parts[1])
                total_mb = int(parts[2])
                free_mb = total_mb - used_mb
                
                used_gb = used_mb / 1024
                total_gb = total_mb / 1024
                free_gb = free_mb / 1024
                
                print(f"  GPU {gpu_id}: 使用 {used_gb:.1f}GB, 可用 {free_gb:.1f}GB / {total_gb:.1f}GB")
                
    except (subprocess.CalledProcessError, FileNotFoundError) as e:
        print(f"无法获取GPU状态: {e}")

def main():
    """
    主函数：执行完整的GPU重置流程
    """
    print("=" * 60)
    print("开始GPU重置流程")
    print("=" * 60)
    
    # 1. 终止所有GPU进程
    kill_all_gpu_processes()
    
    # 2. 重置CUDA上下文
    reset_cuda_context()
    
    # 3. 清理系统内存
    cleanup_system_memory()
    
    # 4. 等待GPU状态稳定
    wait_for_gpu_stable(10)
    
    print("=" * 60)
    print("GPU重置完成")
    print("=" * 60)

if __name__ == "__main__":
    main() 