import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt

# 创建输入数据
X = torch.tensor(np.arange(0, 10, 0.001), dtype=torch.float32).view(-1, 1)  # 转换为二维张量形状
Y = np.sin(X.numpy()).reshape(-1, 1)  # 计算正弦并转为二维数组
X = X.cuda()
# 转换目标数据为 PyTorch 张量
Y_tensor = torch.tensor(Y, dtype=torch.float32)
Y_tensor = Y_tensor.cuda()
# 定义多层感知器模型

class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(1, 64)   # 输入层到第一个隐藏层
        self.fc2 = nn.Linear(64, 64)   # 第一个隐藏层到第二个隐藏层
        self.fc3 = nn.Linear(64, 32)   # 第二个隐藏层到第三个隐藏层
        self.fc4 = nn.Linear(32, 1)    # 第三个隐藏层到输出层

    def forward(self, x):
        x = torch.relu(self.fc1(x))  # 第一层激活
        x = torch.relu(self.fc2(x))  # 第二层激活
        x = torch.relu(self.fc3(x))  # 第三层激活
        x = self.fc4(x)               # 输出层
        return x

# 初始化模型、损失函数和优化器
model = MLP().cuda()
criterion = nn.MSELoss()  # 均方误差损失
optimizer = optim.AdamW(model.parameters(), lr=0.001)

# 训练模型
num_epochs = 1000  # 增加训练次数
for epoch in range(num_epochs):
    model.train()
    
    optimizer.zero_grad()  # 清空梯度
    outputs = model(X)  # 前向传播
    loss = criterion(outputs, Y_tensor)  # 计算损失
    loss.backward()  # 反向传播
    optimizer.step()  # 更新参数

    if (epoch+1) % 100 == 0:
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

# 测试模型
model.eval()
with torch.no_grad():
    predicted = model(X)

X = X.cpu()
predicted = predicted.cpu()
Y_tensor = Y_tensor.cpu()
# 可视化结果
plt.scatter(X.numpy(), Y_tensor.numpy(), label='真实数据', s=1)
plt.plot(X.numpy(), predicted.numpy(), color='red', label='预测数据')
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
plt.title('MLP Regression Example')
plt.show()