import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import os
np.random.seed(0)
torch.manual_seed(0)

'''导入数据集'''
current_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(current_dir + "/sindata_1000.csv", delimiter=',')
num_data = len(data)
split = int(0.8 * num_data)
print(f'数据集大小：{num_data}')
'''分割数据集'''
data_train = np.array(data[:split])
data_test = np.array(data[split:])


# 把一段时间序列整体作为输入
# 输入序列长度
seq_len = 20
# 把训练数据分成长度为 (seq_len + 1) 的 num_train 份，把切分序列后多余的部分去掉
num_train = len(data_train) // (seq_len + 1) * (seq_len + 1)
data_train = np.array(data_train[:num_train]).reshape(-1, seq_len + 1, 1)
# 转为PyTorch张量
# 形状为(num_train, seq_len, input_size)
x_train = torch.from_numpy(data_train[:, :seq_len]).to(torch.float32)
y_train = torch.from_numpy(data_train[:, 1: seq_len + 1]).to(torch.float32)
# 从头开始一直到最后一个（不包含最后一个）
x_test = torch.from_numpy(data_test[:-1]).to(torch.float32) 
y_test = torch.from_numpy(data_test[1:]).to(torch.float32)
print(f'data_train.shape = {data_train.shape}')
print(f'训练序列数：{data_train.shape[1]}')
print(f'x_train.shape = {x_train.shape}')
print(f'y_train.shape = {y_train.shape}')
print(f'x_test.shape = {x_test.shape}')
print(f'y_test.shape = {y_test.shape}')


'''
门控循环单元
重置门有助于捕获序列中的短期依赖关系
当重置门 r_t 接近1时，恢复一个的普通的循环神经网络
当重置门 r_t 接近0时，候选隐状态是以 x_t 作为输入的多层感知机的结果
更新门有助于捕获序列中的长期依赖关系
当更新门 z_t 接近1时，新的隐状态 h_t 就会接近旧状态 h_{t-1}
当更新门 z_t 接近0时，新的隐状态 h_t 就会接近候选隐状态 \tiled h_t，其中包含 x_t 的信息
'''
class GRU(nn.Module):
    # 包含PyTorch的GRU和拼接的MLP
    def __init__(self, input_size, output_size, hidden_size):
        super().__init__()
        self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size) 
        # MLP: 将中间变量映射到预测输出
        self.linear = nn.Linear(hidden_size, output_size)
        
    def forward(self, x, hidden):
        # 前向传播
        # x 的维度为 (batch_size, seq_len, input_size)
        # GRU模块的输入 的维度为 (seq_len, batch_size, input_size)
        # 因此需要交换x的坐标轴
        # out 的维度为 (seq_len, batch_size, hidden_size)
        out, hidden = self.gru(torch.transpose(x, 0, 1), hidden) 
        # 取序列最后的中间变量输入给全连接层
        out = self.linear(out.view(-1, hidden_size))
        return out, hidden


'''GRU超参数'''
input_size = 1 # 输入维度
output_size = 1 # 输出维度
hidden_size = 16 # 中间变量维度
learning_rate = 5e-4
gru = GRU(input_size, output_size, hidden_size)
gru_optim = torch.optim.Adam(gru.parameters(), lr=learning_rate)

'''GRU测试函数'''
def test_gru(gru, x, hidden, pred_steps):
    # x 初始的输入
    # hidden 中间变量
    pred = []
    x_input = x.view(-1, input_size)
    # print(f'x.shape = {x.shape}') # torch.Size([])
    # print(f'x_input.shape = {x_input.shape}') # torch.Size([1, 1])
    for i in range(pred_steps):
        gru_pred, hidden = gru(x_input, hidden)
        pred.append(gru_pred.detach())
        # 将自己预测的 gru_pred 作为输入 x_input，再预测之后的 gru_pred
        x_input = gru_pred 
    return torch.concat(pred).reshape(-1)



'''MLP的超参数'''
# 对比方案
hidden_1 = 32
hidden_2 = 16
mlp = nn.Sequential(
    nn.Linear(input_size, hidden_1),
    nn.ReLU(),
    nn.Linear(hidden_1, hidden_2),
    nn.ReLU(),
    nn.Linear(hidden_2, output_size)
)
mlp_optim = torch.optim.Adam(mlp.parameters(), lr=learning_rate)

'''MLP测试函数'''
def test_mlp(mlp, x, pred_steps):
    # x 初始的输入
    # 相比于GRU少了中间变量
    pred = []
    x_input = x.view(-1, input_size)
    for i in range(pred_steps):
        mlp_pred = mlp(x_input)
        pred.append(mlp_pred.detach())
        x_input = mlp_pred
    return torch.concat(pred).reshape(-1)






'''training'''
max_epoch = 150
print_step = 10 # 展示间隔
# 预测误差的评价准则
loss_criterion = nn.functional.mse_loss
hidden = None # GRU的中间变量
gru_losses = []
mlp_losses = []
gru_test_losses = []
mlp_test_losses = []
for epoch in range(max_epoch):
    gru_loss = 0.0
    mlp_loss = 0.0
    # 随机梯度下降
    # 取出每一份数据
    for X, y in zip(x_train, y_train):
        # print(f'X.shape = {X.shape}') # torch.Size([20, 1])
        # print(f'y.shape = {y.shape}') # torch.Size([20, 1])
        # print(f'X[None, ...].shape = {X[None, ...].shape}') # torch.Size([1, 20, 1])
        # print(f'X.view(-1, input_size).shape = {X.view(-1, input_size).shape}') # torch.Size([20, 1])
        # hhhhhhh
        
        '''更新GRU模型'''
        # 我们不需要通过梯度回传更新中间变量
        # 因此将其从有梯度的部分分离出来
        if hidden is not None:
            hidden.detach_()
        gru_pred, hidden = gru(X[None, ...], hidden)
        gru_train_loss = loss_criterion(gru_pred.view(y.shape), y)
        gru_optim.zero_grad()
        gru_train_loss.backward()
        gru_optim.step()
        gru_loss += gru_train_loss.item()
        
        '''更新MLP模型'''
        # 需要对输入的维度进行调整，变成(seq_len, input_size)的形式
        mlp_pred = mlp(X.view(-1, input_size))
        mlp_train_loss = loss_criterion(mlp_pred.view(y.shape), y)
        mlp_optim.zero_grad()
        mlp_train_loss.backward()
        mlp_optim.step()
        mlp_loss += mlp_train_loss.item()
    
    # num_train 份数据上的平均损失
    gru_loss /= len(x_train)
    mlp_loss /= len(x_train)
    gru_losses.append(gru_loss)
    mlp_losses.append(mlp_loss)
    
    '''testing'''
    # 训练过程中的每个 epoch 都测试一下看看效果
    # 训练和测试时的中间变量序列长度不同，训练时为seq_len，测试时为1
    gru_pred = test_gru(gru, x_test[0], hidden[:, -1], len(y_test))
    mlp_pred = test_mlp(mlp, x_test[0], len(y_test))
    gru_test_loss = loss_criterion(gru_pred, y_test).item()
    mlp_test_loss = loss_criterion(mlp_pred, y_test).item()
    gru_test_losses.append(gru_test_loss)
    mlp_test_losses.append(mlp_test_loss)
    
    if epoch % print_step == 0 or epoch == max_epoch - 1:
        print(f'Epoch: {epoch}/{max_epoch}, GRU loss: {gru_loss:.4f}, MLP loss: {mlp_loss:.4f}, GRU test loss: {gru_test_loss:.4f}, MLP test loss: {mlp_test_loss:.4f}')

        


'''testing'''
# 最终测试
gru_preds = test_gru(gru, x_test[0], hidden[:, -1], len(y_test)).numpy()
mlp_preds = test_mlp(mlp, x_test[0], len(y_test)).numpy()

plt.figure(figsize=(13, 5))

'''绘制训练过程中的损失'''
plt.subplot(121)
x_plot = np.arange(len(gru_losses)) + 1
plt.plot(x_plot, gru_losses, color='blue', label='GRU training loss')
plt.plot(x_plot, mlp_losses, color='red', ls='-.', label='MLP training loss')
plt.plot(x_plot, gru_test_losses, color='blue', ls='--', label='GRU test loss')
plt.plot(x_plot, mlp_test_losses, color='red', ls=':', label='MLP test loss')
plt.xlabel('Training step')
plt.ylabel('Loss')
plt.legend(loc='best') # lower left

'''绘制训练数据和不同模型的测试结果'''
plt.subplot(122)
plt.scatter(np.arange(split), data[:split], color='blue', s=10, label='training set')
plt.scatter(np.arange(split, num_data), data[split:], color='none', edgecolor='orange', s=10, label='test set')
plt.scatter(np.arange(split, num_data - 1), mlp_preds, color='violet', marker='x', alpha=0.4, s=20, label='MLP preds')
plt.scatter(np.arange(split, num_data - 1), gru_preds, color='green', marker='*', alpha=0.4, s=20, label='GRU preds')
plt.legend(loc='best')
plt.xlabel('X axis')
plt.ylabel('Y axis')
# plt.savefig('output_20_0.png')
# plt.savefig('output_20_0.pdf')
plt.show()
