import torch.utils
import torchvision
import torch.utils.cpp_extension

print("PyTorch version:", torch.__version__)
print("Torchvision version:", torchvision.__version__)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(type(device))
print('Using device:', "\n", ">>>>>", device)
# 查看GPU是否可用
print("torch.cuda.is_available:", "\n", ">>>>>", torch.cuda.is_available())
# 查看pytorch编译时的cuda版本
print("torch.version.cuda : ", "\n", ">>>>>", torch.version.cuda)
# 查看pytorch链接的cuda版本
print("torch.utils.cpp_extension.CUDA_HOME : ", "\n", ">>>>>", torch.utils.cpp_extension.CUDA_HOME)
# 查看显卡的数量
print("torch.cuda.device_count:", "\n", ">>>>>", torch.cuda.device_count())
# 查看显卡的型号
print("torch.cuda.get_device_name(0):", "\n", ">>>>>", torch.cuda.get_device_name(0))
# 查看在用的显卡序号
print("torch.cuda.current_device():", "\n", ">>>>>", torch.cuda.current_device())
# 查看GPU的计算容量
print("torch.cuda.get_device_capability(device=0):", "\n", ">>>>>", torch.cuda.get_device_capability(device=0))
# 查看当前cuda流
print("torch.cuda.current_stream(device=0):", "\n", ">>>>>", torch.cuda.current_stream(device=0))
# 检查cudnn是否可用
print("torch.backends.cudnn.is_available() : ", "\n", ">>>>>", torch.backends.cudnn.is_available())
# 查看cudnn版本
print("torch.backends.cudnn.version() : ", "\n", ">>>>>", torch.backends.cudnn.version())


print("torch.cuda.mem_get_info : ", "\n", ">>>>>", torch.cuda.mem_get_info())
# 查看显卡的属性
# print(torch.cuda.get_device_properties(0))
num = torch.cuda.device_count()
infos = [torch.cuda.get_device_properties(i) for i in range(num)]
for info in infos:
    print("info :", "\n", ">>>>>", info)
# Additional Info when using cuda
if device.type == 'cuda':
    print("Device name : ", "\n", ">>>>>", torch.cuda.get_device_name(0))
    print('Memory Usage:', "\n")
    print('Allocated:', "\n", ">>>>>", round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1), 'GB')
    print('Cached:   ', "\n", ">>>>>", round(torch.cuda.memory_reserved(0) / 1024 ** 3, 1), 'GB')

