import torch
import subprocess
import re

def check_gpu_info():
    # 检查是否有可用的 GPU
    if not torch.cuda.is_available():
        print("GPU Available: False")
        return
    
    print("GPU Available: True")
    gpu_count = torch.cuda.device_count()
    print(f"GPU Count: {gpu_count}")

    # 获取每个 GPU 的信息
    for i in range(gpu_count):
        device_name = torch.cuda.get_device_name(i)
        memory_allocated = torch.cuda.memory_allocated(i) / (1024 ** 2)  # MB
        memory_cached = torch.cuda.memory_reserved(i) / (1024 ** 2)      # MB
        total_memory = torch.cuda.get_device_properties(i).total_memory / (1024 ** 2)  # MB
        
        print(f"Device {i}: {device_name}")
        print(f"  Memory Allocated (MB): {memory_allocated:.2f}")
        print(f"  Memory Cached (MB): {memory_cached:.2f}")
        print(f"  Total Memory (MB): {total_memory:.2f}")

    # 获取 CUDA 版本
    cuda_version = subprocess.run(['nvcc', '--version'], capture_output=True, text=True)
    if cuda_version.returncode == 0:
        cuda_version_str = re.search(r'V([\d.]+)', cuda_version.stdout).group(1)
    else:
        cuda_version_str = 'N/A'
    
    print(f"CUDA Version: {cuda_version_str}")

    # 获取 cuDNN 版本
    try:
        import torch.backends.cudnn as cudnn
        cudnn_version = cudnn.version()
        print(f"cuDNN Version: {cudnn_version}")
    except Exception:
        print("cuDNN Version: N/A")

    # 检查 CUDA 和 cuDNN 版本
    if cuda_version_str != 'N/A' and cudnn_version != 'N/A':
        cuda_major = int(cuda_version_str.split('.')[0])
        cudnn_major = int(cudnn_version // 1000)  # cuDNN version is like 90100, extract the major version
        if cuda_major != cudnn_major:
            print("CUDA/cuDNN Version Match: False")
        else:
            print("CUDA/cuDNN Version Match: True")
    else:
        print("CUDA/cuDNN Version Match: N/A")

# 调用函数
if __name__ == "__main__":
    check_gpu_info()
