import torch
import torch.nn as nn
import torch.optim as optim

# 模型定义
class Perceptron(nn.Module):
    def __init__(self):
        super(Perceptron, self).__init__()
        self.fc = nn.Linear(1, 1)

    def forward(self, x):
        return self.fc(x)

class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(1, 64)
        self.relu = nn.ReLU()  # 明确定义ReLU激活函数
        self.fc2 = nn.Linear(64, 1)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)  # 应用ReLU激活函数
        return self.fc2(x)

# 检查是否有可用的GPU，并准备数据
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
X = torch.tensor([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], device=device, dtype=torch.float32).reshape(-1, 1)
y = torch.tensor([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], device=device, dtype=torch.float32).reshape(-1, 1)

# 实例化模型并移动到设备上
model_perceptron = Perceptron().to(device)
model_mlp = MLP().to(device)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer_perceptron = optim.SGD(model_perceptron.parameters(), lr=0.01)
optimizer_mlp = optim.SGD(model_mlp.parameters(), lr=0.01)

# 训练单层感知机
for epoch in range(1000):
    optimizer_perceptron.zero_grad()
    outputs_perceptron = model_perceptron(X)
    loss_perceptron = criterion(outputs_perceptron, y)
    loss_perceptron.backward()
    optimizer_perceptron.step()

    if (epoch + 1) % 100 == 0:
        print(f'Perceptron - Epoch [{epoch + 1}/{1000}], Loss: {loss_perceptron.item()}')

# 保存单层感知机模型
torch.save(model_perceptron.state_dict(), 'model_perceptron.pth')
print('Saved model to model_perceptron.pth')

# 训练多层感知机
for epoch in range(1000):
    optimizer_mlp.zero_grad()
    outputs_mlp = model_mlp(X)
    loss_mlp = criterion(outputs_mlp, y)
    loss_mlp.backward()
    optimizer_mlp.step()

    if (epoch + 1) % 100 == 0:
        print(f'MLP - Epoch [{epoch + 1}/{1000}], Loss: {loss_mlp.item()}')

# 保存多层感知机模型
torch.save(model_mlp.state_dict(), 'model_mlp.pth')
print('Saved model to model_mlp.pth')