import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader

class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        self.layer = nn.Sequential(
            nn.Linear(1, 10), nn.ReLU(),
            nn.Linear(10, 50), nn.ReLU(),
            nn.Linear(50, 100), nn.ReLU(),
            nn.Linear(100, 50), nn.ReLU(),
            nn.Linear(50, 10), nn.ReLU(),
            nn.Linear(10, 1),
        )
        self.double()
    def forward(self, x):
        x = self.layer(x)
        return x

# y = 2*x**8 + 3*x**6 + sqrt(x)
step = 200
x = torch.linspace(start=0, end=1, steps=step, dtype=torch.float64)
X = x.unsqueeze(1)
y = 2*torch.pow(x, 8) + 3*torch.pow(x, 6) + torch.sqrt(x)
Y = y.unsqueeze(1)

net = MyNet()

# 损失函数
loss_fun = nn.MSELoss()
# 优化器
optimizer = optim.SGD(net.parameters(), lr = 0.005)

batchSize = 20
data = list(zip(X, Y))
train = DataLoader(data, batch_size=batchSize, shuffle=True)

ls = []
epoch_num = 1000

for epoch in range(epoch_num):
    ave = 0.0
    for xx, yy in train:
        yy_predict = net.forward(xx)
        loss = loss_fun(yy_predict, yy)
        ave += loss.item()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    if (epoch+1)%10 == 0:
        print(f'epoch:{epoch+1}, loss:{ave/batchSize:.9f}')
    ls.append(ave/batchSize)

# plt.plot(X.numpy(), net.forward(X).detach().numpy(), 'r')
# plt.plot(x, y, 'b')
# plt.show()
#
# plt.plot(range(len(ls)), ls)
# plt.show()
a = torch.linspace(1, 2, 20,dtype=torch.float64).unsqueeze(1)
print(net.forward(a).detach().numpy())

