import torch
import torch.nn as nn
import torch.optim as optim
from perceptron_model import Perceptron
from net_model import Net

# 设置训练超参数
learning_rate = 0.01
num_epochs = 500

# 准备训练数据
x_train = torch.tensor([[-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]])
y_train = torch.tensor([[-3.0], [-1.0], [1.0], [3.0], [5.0], [7.0]])

# 创建单层感知机模型和优化器
perceptron_model = Perceptron()
perceptron_optimizer = optim.SGD(perceptron_model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()

# 使用CUDA进行加速（如果可用）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)
perceptron_model.to(device)
x_train = x_train.to(device)
y_train = y_train.to(device)

# 训练单层感知机模型
print("Training Perceptron model...")
for epoch in range(num_epochs):
    # 前向传播
    outputs = perceptron_model(x_train)
    loss = criterion(outputs, y_train)

    # 反向传播和优化
    perceptron_optimizer.zero_grad()
    loss.backward()
    perceptron_optimizer.step()

    if (epoch + 1) % 50 == 0:
        print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item()))

# 保存单层感知机模型
perceptron_model_path = "perceptron_model.pth"
torch.save(perceptron_model.state_dict(), perceptron_model_path)
print("Saved perceptron model to", perceptron_model_path)

# 创建多层感知机模型和优化器
net_model = Net()
net_optimizer = optim.SGD(net_model.parameters(), lr=learning_rate)

# 使用CUDA进行加速（如果可用）
net_model.to(device)

# 训练多层感知机模型
print("Training Net model...")
for epoch in range(num_epochs):
    # 前向传播
    outputs = net_model(x_train)
    loss = criterion(outputs, y_train)

    # 反向传播和优化
    net_optimizer.zero_grad()
    loss.backward()
    net_optimizer.step()

    if (epoch + 1) % 50 == 0:
        print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item()))

# 保存多层感知机模型
net_model_path = "net_model.pth"
torch.save(net_model.state_dict(), net_model_path)
print("Saved net model to", net_model_path)
