import torch
import torch.nn as nn
import torch.optim as optim

def main():
    # 检查 CUDA 是否可用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    if device.type == 'cuda':
        print(f"CUDA 版本: {torch.version.cuda}")
        print(f"PyTorch CUDA 版本: {torch.version.cuda}")
        print(f"GPU 数量: {torch.cuda.device_count()}")
        print(f"当前使用的 GPU: {torch.cuda.current_device()} - {torch.cuda.get_device_name(torch.cuda.current_device())}")

    # 定义一个简单的神经网络
    class SimpleNet(nn.Module):
        def __init__(self, input_size=1000, hidden_size=100, num_classes=10):
            super(SimpleNet, self).__init__()
            self.fc1 = nn.Linear(input_size, hidden_size)
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(hidden_size, num_classes)

        def forward(self, x):
            out = self.fc1(x)
            out = self.relu(out)
            out = self.fc2(out)
            return out

    # 实例化网络并移动到设备
    model = SimpleNet().to(device)
    print("网络已移动到设备.")

    # 创建随机输入数据和目标
    inputs = torch.randn(64, 1000).to(device)  # 批大小为64，输入维度为1000
    targets = torch.randint(0, 10, (64,)).to(device)  # 随机目标类别

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    # 前向传播
    outputs = model(inputs)
    loss = criterion(outputs, targets)
    print(f"前向传播的损失: {loss.item()}")

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    print("反向传播和优化已完成.")

    # 确认参数已更新
    with torch.no_grad():
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        print(f"更新后的前向传播的损失: {loss.item()}")

if __name__ == "__main__":
    main()
