import torch  
import torch.nn as nn 
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
X = torch.tensor([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]).unsqueeze(1) # 定义输入数据X，改变结构为了之后前向传播使用
y = torch.tensor([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]).unsqueeze(1)  # 定义真实标签y，改变结构为了之后前向传播使用
#定义单程感知机
class Perceptron1(nn.Module):
    def __init__(self):
        super().__init__() 
        self.fc = nn.Linear(1,1)
    def forward(self, x):      
        y = self.fc.forward(x)
        return y
perceptron_model = Perceptron1()
perceptron_loss_fn = nn.MSELoss(reduction='mean')
perceptron_optimizer = torch.optim.SGD(perceptron_model.parameters(), lr=0.01)
for epoch in range(500):
    y_pred = perceptron_model(X.unsqueeze(1))
    loss = perceptron_loss_fn(y_pred, y.unsqueeze(1))
    perceptron_optimizer.zero_grad()
    loss.backward()
    perceptron_optimizer.step()
    if (epoch+1) % 50 == 0:
        print(f'Epoch [{epoch+1}/500], Loss: {loss.item():.4f}')
# 保存模型参数
perceptron_path = 'perceptron_model.pth'
torch.save(perceptron_model.state_dict(), perceptron_path)
print("Saved perceptron model to perceptron_model.pth")

#定义多层感知机
class Perceptron2(nn.Module):
    def __init__(self):
        super(Perceptron2, self).__init__()
        self.fc1 = nn.Linear(1,2)
        self.fc2 = nn.Linear(2,1)       
    def forward(self, x):      
        y = torch.relu(self.fc1(x))
        y = self.fc2(y)  
        return y
net_model = Perceptron2()
net_loss_fn = nn.MSELoss(reduction='mean')
net_optimizer = torch.optim.SGD(net_model.parameters(), lr=0.01)
for epoch in range(500):
    y_pred = net_model(X.unsqueeze(1))
    loss = net_loss_fn(y_pred, y.unsqueeze(1))
    net_optimizer.zero_grad()
    loss.backward()
    net_optimizer.step()
    if (epoch+1) % 50 == 0:
        print(f'Epoch [{epoch+1}/500], Loss: {loss.item():.4f}')
net_path = 'net_model.pth'
torch.save(net_model.state_dict(), net_path)
print("Saved net model to net_model.pth")