import torch
import torch.nn as nn
from perceptron_model import Perceptron
from net_model import Net

# 检查是否有可用的CUDA设备,如果有则使用GPU,否则使用CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 数据
X = torch.tensor([-1.0,  0.0, 1.0, 2.0, 3.0, 4.0]).reshape(-1, 1).to(device)
y = torch.tensor([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]).reshape(-1, 1).to(device)

# 模型
model1 = Perceptron().to(device) # 创建感知机模型的实例,并将其移动到指定设备上
model2 = Net().to(device) # 创建另一个模型实例并将其移动到指定设备上

# 损失函数
loss_fnc = nn.MSELoss(reduction='mean')

# 优化器
optimizer1 = torch.optim.SGD(model1.parameters(), lr=0.01)
optimizer2 = torch.optim.SGD(model2.parameters(), lr=0.01)

# 训练
for epoch in range(500):
    # 前向传播
    y1 = model1(X)
    # 计算损失
    loss1 = loss_fnc(y1, y)
    # 反向传播和优化
    optimizer1.zero_grad()
    loss1.backward()
    optimizer1.step()

    if (epoch+1) % 50 == 0:
        print(f'Epoch [{epoch+1}/500], Loss1: {loss1.item():.4f}')

model1.eval()
# 保存模型参数和结构
torch.save(model1.state_dict(), 'perceptron_model.pth')
print(f"Saved perceptron model to perceptron_model.pth")

for epoch in range(500):
    # 前向传播
    y2 = model2(X)
    # 计算损失
    loss2 = loss_fnc(y2, y)
    # 反向传播和优化
    optimizer2.zero_grad()
    loss2.backward()
    optimizer2.step()

    if (epoch+1) % 50 == 0:
        print(f'Epoch [{epoch+1}/500], Loss2: {loss2.item():.4f}')

model2.eval()
# 保存模型参数和结构
torch.save(model2.state_dict(), 'net_model.pth')
print(f"Saved net model to net_model.pth")