import torch
import torch.nn as nn
from perceptron_model import Perceptron
from net_model import Net

# 检查是否有可用的CUDA设备,如果有则使用GPU,否则使用CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建输入数据的张量,并将其移动到指定设备上
X = torch.tensor([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]).view(-1, 1).to(device)
y = torch.tensor([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]).view(-1, 1).to(device)

# 定义模型列表
models = [Perceptron().to(device), Net().to(device)]
for model in models:
    # 将模型移动到指定设备上
    model.to(device)
    # 定义损失函数
    loss_fnc = nn.MSELoss(reduction='mean')
    # 定义优化器
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

    print(f"开始在{device}上训练: {model.__class__.__name__}")
    for epoch in range(500):
        # 前向传播
        y_pred = model(X.unsqueeze(1))
        # 计算损失
        loss = loss_fnc(y_pred, y.unsqueeze(1))
        # 清零梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新参数
        optimizer.step()
        # 每50个epoch打印一次损失
        if (epoch+1) % 50 == 0:
            print(f'Epoch [{epoch+1}/500], Loss: {loss.item():.4f}')

    # 定义保存模型参数的文件路径
    params_path = f'{model.__class__.__name__}_model.pth'
    # 打印模型参数文件路径
    print(f"Saved {model.__class__.__name__} model to {params_path}")
    # 保存模型参数
    torch.save(model.state_dict(), params_path)