import torch
from torch import nn
from matplotlib import pyplot as plt

T = 1000
time = torch.arange(1, T+1, dtype=torch.float32)
x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))



# 预测的就是 yt = xt
# 数据就是这个xt前面的 tau 个数据
tau = 4
features = torch.zeros((T - tau, tau)) #可以理解为 T - tau为样本数，tau为每个样本的属性数目
for i in range(tau):
    features[:, i] = x[i:T - tau + i]  # [996,4]
labels = x[tau:].reshape((-1, 1))  # [996,1]

# 样本数为996
batch_size = 16
# n_train表示只用前 600 个样本进行训练
n_tarin = 600

# 第一个batch 0-15 features[0:batch_size]
# 第二个batch 16-31 features[batch_size: batch_size*2]





def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform_(m.weight)

def get_net():
    net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1))
    net.apply(init_weights)
    return net
loss = nn.MSELoss()

net = get_net()
trainer = torch.optim.Adam(net.parameters(), 0.01)
for epoch in range(7):
    err = 0
    for i in range(int(996 / batch_size)):
        X = features[batch_size * i: batch_size * (i + 1)]
        y = labels[batch_size * i: batch_size * (i + 1)]
        trainer.zero_grad()
        l = loss(net(X), y)
        err += l
        l.backward()
        trainer.step()
    print("第{}轮，loss = {}".format(epoch + 1, err))

pred = net(features)
pred = pred.view(1, 996)[0]
print(pred.shape)
print(time[tau:].shape)

plt.scatter(time, x, color='g')
plt.scatter(time[tau:].detach().numpy(), pred.detach().numpy(), color='r')
plt.show()
