import torch

def check_gpu_info():
    # 检查是否有可用的GPU
    if torch.cuda.is_available():
        print("GPU is available")
        # 获取GPU数量
        gpu_count = torch.cuda.device_count()
        print(f"Number of GPUs: {gpu_count}")

        # 获取每个GPU的信息
        for i in range(gpu_count):
            print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
            # 获取GPU显存大小
            total_memory = torch.cuda.get_device_properties(i).total_memory
            print(f"Total memory of GPU {i}: {total_memory / (1024 ** 3):.2f} GB")

        # 获取CUDA版本
        cuda_version = torch.version.cuda
        print(f"CUDA version: {cuda_version}")

        # 获取cuDNN版本
        cudnn_version = torch.backends.cudnn.version()
        print(f"cuDNN version: {cudnn_version}")

        # 检查CUDA和cuDNN版本是否匹配
        if cuda_version and cudnn_version:
            if cuda_version.startswith('10') and cudnn_version >= 7600:
                print("CUDA and cuDNN versions are compatible.")
            elif cuda_version.startswith('11') and cudnn_version >= 8000:
                print("CUDA and cuDNN versions are compatible.")
            else:
                print("Warning: CUDA and cuDNN versions may not be compatible.")
        else:
            print("Warning: CUDA or cuDNN version information is missing.")
    else:
        print("GPU is not available")

# 调用函数
check_gpu_info()



