import torch
from torch import nn
import torch.functional as F
import math


def test():
    # 检查CUDA可用性
    print(f"CUDA available: {torch.cuda.is_available()}")

    # 检查GPU数量
    print(f"Number of GPUs: {torch.cuda.device_count()}")

    # 检查当前使用的设备
    current_device = torch.cuda.current_device() if torch.cuda.is_available() else None
    print(f"Current device: {current_device}")

    # 如果有GPU，显示详细信息
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
            print(f"  Memory allocated: {torch.cuda.memory_allocated(i)} bytes")
            print(f"  Memory cached: {torch.cuda.memory_reserved(i)} bytes")

    # 检查张量是否在GPU上
    X = torch.randn(128, 64, 512)
    print(f"Tensor X device: {X.device}")

    # 将张量移动到GPU（如果可用）
    if torch.cuda.is_available():
        X = X.to('cuda')
        print(f"Tensor X device after moving to GPU: {X.device}")

    print(X.shape)

    d_model = 512
    n_head = 8


# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    test()