from numpy import *
from matplotlib.pyplot import *
import torch


"""
使用 Pytorch 搭建一个 8/16/16/1 的四层神经网络，使用 Sigmoid 激活函数和 MSE 损失函数，以学习率 0.05 训练 1000 步，并调用 Adam 优化计算方法
"""


def func(x):
    return 0.4 * sin(2 * pi * x) * cos(4 * pi * x) + 0.5


N_t = 200
N_v = 50
X_t = random.rand(N_t)
y_t = zeros(N_t)
X_v = random.rand(N_v)
y_v = zeros(N_v)
for i in range(N_t):
    y_t[i] = func(X_t[i]) + (random.rand() - 0.5) * 0.1
for j in range(N_v):
    y_v[j] = func(X_v[j]) + (random.rand() - 0.5) * 0.1

# %%
X_train = torch.tensor(X_t.reshape(-1, 1)).float()
y_train = torch.tensor(y_t).float()
X_val = torch.tensor(X_v.reshape(-1, 1)).float()
y_val = torch.tensor(y_v).float()

DNN = torch.nn.Sequential(
    torch.nn.Linear(1, 8),
    torch.nn.Sigmoid(),
    torch.nn.Linear(8, 16),
    torch.nn.Sigmoid(),
    torch.nn.Linear(16, 16),
    torch.nn.Sigmoid(),
    torch.nn.Linear(16, 1),
    torch.nn.Sigmoid(),
)

optimizer = torch.optim.Adam(DNN.parameters(), lr=0.05)
loss_func = torch.nn.MSELoss()
train_loss_list = []
val_loss_list = []
X_test = torch.tensor(linspace(0, 1, 100).reshape(-1, 1)).float();
pred_list = []
for epoch in range(1000):
    y_train_pred = DNN(X_train).squeeze()
    y_val_pred = DNN(X_val).squeeze()
    train_loss = loss_func(y_train_pred, y_train)
    val_loss = loss_func(y_val_pred, y_val)
    optimizer.zero_grad()
    train_loss.backward()
    optimizer.step()
    if epoch % 10 == 0:
        train_loss_list.append(train_loss.item())
        val_loss_list.append(val_loss.item())
        pred_list.append(DNN(X_test).detach().numpy())
        print('Epochs: %d, train loss: %f, val loss: %f' % (epoch, train_loss.item(), val_loss.item()))
        cla()
        scatter(X_t, y_t, color='red', s=10, label='Training data')
        scatter(X_v, y_v, color='blue', s=10, label='Validation data')
        plot(x, func(x), '--', color='black', label='Target curve')
        plot(x, pred_list[int(epoch / 10)], color='green', label='Trained curve')
        pause(0.1)
