import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn

x = np.linspace(0, 2 * np.pi, 300)
y = np.cos(x)

x = x.reshape([300, 1])
y = y.reshape([300, 1])
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()

# 加上nn.ReLU()存在了非线性
net = nn.Sequential(
    nn.Linear(1, 50), nn.ReLU(),
    nn.Linear(50, 200), nn.ReLU(),
    # nn.Linear(150, 200), nn.ReLU(),
    nn.Linear(200, 300), nn.ReLU(),
    # nn.Linear(250, 300), nn.ReLU(),
    nn.Linear(300, 200), nn.ReLU(),
    #nn.Linear(250, 200), nn.ReLU(),
    nn.Linear(200, 50), nn.ReLU(),
    #nn.Linear(150, 50), nn.ReLU(),
    nn.Linear(50, 1)
)

# 帮助朝着正确方向前进
loss_fun = nn.MSELoss()
# 怎么前进 优化器
optimizer = torch.optim.SGD(
    net.parameters(),  # 数据
    lr=0.01  # 跳跃步长?
)

num_epoch = 5000

ls = []
for epoch in range(num_epoch):
    y_predicted = net(x)  # 模型预测
    optimizer.zero_grad()
    loss = loss_fun(y_predicted, y)  # 预测与真实差值
    loss.backward()  # 反向传播
    optimizer.step()  # 更正
    if (epoch + 1) % 100 == 0:
        print(f'epoch:{epoch + 1}, train_loss:{loss.item():.6f}')
    ls.append(loss.item())
# detach把梯度属性消除
plt.plot(x.numpy(), net(x).detach().numpy(), 'r')
plt.plot(x.numpy(), y.numpy(), 'b')
plt.show()

plt.plot(range(num_epoch), ls)
plt.show()

# a = torch.tensor(5.0, requires_grad=True)
# print(a.requires_grad)
# a.grad.zero_()
# b = a*a
# b.backward()
# print(b)
# print(a.grad)

# c=torch.tensor(2.0, requires_grad=True)
# # # 报错
# # d=c.numpy()
# # 有梯度的方法可以通过调用detach来消除
# d = c.detach().numpy()

# 第一种数据集构造方法--list(zip())
