#!/usr/bin/env python3
"""
GPU状态检查工具
检查GPU可用性和内存使用情况
"""

import sys
import subprocess


def check_nvidia_smi():
    """检查nvidia-smi命令"""
    try:
        result = subprocess.run(['nvidia-smi'], capture_output=True, text=True, check=True)
        print("=== NVIDIA-SMI 输出 ===")
        print(result.stdout)
        return True
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("❌ nvidia-smi 不可用")
        return False


def check_pytorch_cuda():
    """检查PyTorch CUDA支持"""
    try:
        import torch
        print(f"✅ PyTorch 版本: {torch.__version__}")
        print(f"✅ CUDA 可用: {torch.cuda.is_available()}")
        
        if torch.cuda.is_available():
            print(f"✅ CUDA 版本: {torch.version.cuda}")
            print(f"✅ GPU 数量: {torch.cuda.device_count()}")
            
            for i in range(torch.cuda.device_count()):
                props = torch.cuda.get_device_properties(i)
                memory_total = props.total_memory / 1024**3  # GB
                memory_allocated = torch.cuda.memory_allocated(i) / 1024**3  # GB
                memory_reserved = torch.cuda.memory_reserved(i) / 1024**3  # GB
                memory_free = memory_total - memory_reserved
                
                print(f"  GPU {i}: {props.name}")
                print(f"    总内存: {memory_total:.2f} GB")
                print(f"    已分配: {memory_allocated:.2f} GB")
                print(f"    已保留: {memory_reserved:.2f} GB")
                print(f"    可用: {memory_free:.2f} GB")
                
                if memory_free < 2.0:
                    print(f"    ⚠️  可用内存不足 ({memory_free:.2f}GB < 2GB)")
                else:
                    print(f"    ✅ 内存充足")
        return True
    except ImportError:
        print("❌ PyTorch 未安装")
        return False


def check_totalsegmentator():
    """检查TotalSegmentator"""
    try:
        result = subprocess.run(['TotalSegmentator', '--help'], 
                              capture_output=True, text=True, check=True)
        print("✅ TotalSegmentator 已安装")
        return True
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("❌ TotalSegmentator 未安装")
        print("   安装命令: pip install TotalSegmentator")
        return False


def get_gpu_processes():
    """获取GPU进程信息"""
    try:
        result = subprocess.run(['nvidia-smi', '--query-compute-apps=pid,process_name,used_memory', 
                               '--format=csv,noheader,nounits'], 
                              capture_output=True, text=True, check=True)
        
        if result.stdout.strip():
            print("\n=== GPU 进程信息 ===")
            lines = result.stdout.strip().split('\n')
            for line in lines:
                parts = line.split(', ')
                if len(parts) >= 3:
                    pid, name, memory = parts[0], parts[1], parts[2]
                    print(f"  PID {pid}: {name} (使用 {memory} MB)")
        else:
            print("\n✅ 没有GPU进程在运行")
            
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("\n❌ 无法获取GPU进程信息")


def main():
    """主函数"""
    print("🔍 GPU状态检查工具")
    print("=" * 50)
    
    # 检查nvidia-smi
    nvidia_ok = check_nvidia_smi()
    print()
    
    # 检查PyTorch CUDA
    pytorch_ok = check_pytorch_cuda()
    print()
    
    # 检查TotalSegmentator
    totalseg_ok = check_totalsegmentator()
    print()
    
    # 获取GPU进程信息
    if nvidia_ok:
        get_gpu_processes()
        print()
    
    # 总结
    print("=" * 50)
    print("📋 检查结果总结:")
    print(f"  NVIDIA 驱动: {'✅' if nvidia_ok else '❌'}")
    print(f"  PyTorch CUDA: {'✅' if pytorch_ok else '❌'}")
    print(f"  TotalSegmentator: {'✅' if totalseg_ok else '❌'}")
    
    if not all([nvidia_ok, pytorch_ok, totalseg_ok]):
        print("\n⚠️  存在问题，建议:")
        if not nvidia_ok:
            print("  1. 检查NVIDIA驱动是否正确安装")
        if not pytorch_ok:
            print("  2. 安装支持CUDA的PyTorch版本")
        if not totalseg_ok:
            print("  3. 安装TotalSegmentator: pip install TotalSegmentator")
        print("  4. 或者使用CPU进行分割: --device cpu")
    else:
        print("\n🎉 所有组件正常，可以使用GPU进行分割!")


if __name__ == "__main__":
    main()