import torch
import torch.nn as nn
import torchvision

if __name__ == '__main__':
    print(torch.__version__)
    print(torch.backends.cudnn.version())
    
    cuda_available = torch.cuda.is_available()
    print(cuda_available)

    print(f'Cuda version: {torch.version.cuda}')
    print(f'GPU num: {torch.cuda.device_count()}')
    print(f'GPU name: {torch.cuda.get_device_name()}')
    
    print('Current device:', torch.cuda.current_device())
    print(torch.cuda.get_device_capability())

    device = torch.device('cuda') if cuda_available else torch.device('cpu')
    x = torch.empty([1,1024,1024,2**8]).to(device)
    # x = torch.empty([1,1024,1024,2**8]).cuda()

    print('Memory Usage:')
    print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
    print('Cached:   ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
    print('Done')