import torch
from torch import nn
import matplotlib.pyplot as plt
from d2l import torch as d2l

T = 1000
time = torch.arange(1, T + 1, dtype=torch.float32)
#创建正弦数据
value = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T, ))
#plt.plot( time,value)
#plt.xlabel("time")
#plt.ylabel("value")
#xlim限定x作图范围（1000后面不会有空白）
#plt.xlim(1, 1000)
#plt.grid()
#plt.show()
#d2l.plot(time, [value], xlabel="time", ylabel="value", xlim=[1, 1000], figsize=(6, 3))
#d2l.plt.show()

tau = 4
#features = torch.zeros((T - tau, tau))
#print(features.shape)
#难理解切片写法
#for i in range(tau):
#    features[:, i] = value[i: T - tau + i]

features = torch.zeros((T - tau, tau))
for i in range(T - tau):
    for j in range(tau):
        features[i, j] = value[i + j]
        print(f"{i},{j}")
labels = value[tau:].reshape((-1, 1))

batch_size = 16
n_train = 600
# 生成dataset
train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size=batch_size, is_train=True)
for x, y in train_iter:
    print(x.shape, y.shape)
    break


def init_weights(m):
    # pytorch默认使用kaiming正态分布初始化
    if type(m) == nn.Linear:
        # 更改为xavier_normal
        nn.init.xavier_normal_(m.weight)


def get_net():
    net = nn.Sequential(
                        nn.Linear(4, 10),
                        nn.ReLU(), nn.Linear(10, 1)
                        )
    net.apply(init_weights)
    return net


# 开始训练
def fit(net, train_iter, loss_fn, optimizer, epochs):
    for epoch in range(epochs):
        for x, y in train_iter:
            optimizer.zero_grad()
            loss = loss_fn(net(x), y)
            loss.backward()
            optimizer.step()
        print(f"epoch{epoch + 1}",
              f"loss:{d2l.evaluate_loss(net, train_iter, loss_fn):f}")


net = get_net()
mes_loss = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)

fit(net, train_iter, mes_loss, optimizer, 5)
onestep_preds = net(features)
#预测结果和真实结果画在一张图上
d2l.plot([time, time[tau:]], [value.detach().numpy(), onestep_preds.detach().numpy()], "time", "x", legend=["value", "1-step preds"], xlim=[1, 1000], figsize=(6, 3))
#d2l.plt.show()
#多步预测
multistep_preds = torch.zeros(T)
#原始数据
multistep_preds[:n_train + tau] = value[:n_train + tau]
for i in range(n_train + tau, T):
    multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))
#d2l.plot([time, time[tau:], time[n_train + tau:]],
#         [value.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()],
#            "time", "value", legend=["data", "1-step", "multistep"], xlim=[1, 1000], figsize=(6, 3)
#         )
#d2l.plt.show()

# 观察k步预测的结果
max_steps = 64
features = torch.zeros((T - tau, tau + max_steps))
print(features.shape)

#给前tau列赋真实值
for i in range(T - tau):
    for j in range(tau):
        features[i, j] = value[i + j]

#从tau列后到max_steps列用预测值
for i in range(tau, tau + max_steps):
    features[:, i] = net(features[:, i - tau:i]).reshape(-1)

steps = (1, 4, 16, 64)
d2l.plot([time[tau:] + i - 1 for i in steps],
         [features[:, (tau + i - 1)].detach().numpy() for i in steps],
         "time", "value",
         legend=[f"{i}-step preds" for i in steps], xlim=[0, T + max_steps], figsize=(6, 3)
         )
d2l.plt.show()



















