# @FileName  : test6.py
# @Time      : 2025/2/19 10:18
# @Author    : LuZhaoHui
# @Software  : PyCharm

# import torch

# 输出带CPU，表示torch是CPU版本的，否则会是+cuxxx
# print(f'torch的版本是：{torch.__version__}')
#
# print(f'torch是否能使用cuda：{torch.cuda.is_available()}')

# import subprocess
#
# try:
#     # 执行nvidia-smi命令并获取输出
#     result = subprocess.check_output('nvidia-smi', shell=True, text=True)
#     print(result)
# except subprocess.CalledProcessError:
#     print("nvidia-smi命令可能未找到，请确保安装了NVIDIA驱动和CUDA Toolkit。")

import torch

# 检查CUDA是否可用
if torch.cuda.is_available():
    print("CUDA可用")
    # 获取GPU设备数量
    num_gpus = torch.cuda.device_count()
    print(f"检测到 {num_gpus} 个GPU设备。")

    # 显示每个GPU的名称
    for i in range(num_gpus):
        print(f"GPU {i}: {torch.cuda.get_device_name(i)} allocated {torch.cuda.memory_allocated()}")
        device_props = torch.cuda.get_device_properties(torch.cuda.current_device())
        total_memory = device_props.total_memory / 1024 ** 2  # 转换为MB
        allocated_memory = torch.cuda.memory_allocated() / 1024 ** 2  # 转换为MB
        # 计算剩余内存
        free_memory = total_memory - allocated_memory
        print(f"Total memory: {total_memory} MB")
        print(f"Allocated memory: {allocated_memory} MB")
        print(f"Free memory: {free_memory} MB")
else:
    print("CUDA不可用，请检查CUDA安装和驱动。")