import numpy as np 
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence

import matplotlib.pyplot as plt

def load_data():
    # passengers number of international airline , 1949-01 ~ 1960-12 per month
    seq_number = np.array(
        [112., 118., 132., 129., 121., 135., 148., 148., 136., 119., 104.,
         118., 115., 126., 141., 135., 125., 149., 170., 170., 158., 133.,
         114., 140., 145., 150., 178., 163., 172., 178., 199., 199., 184.,
         162., 146., 166., 171., 180., 193., 181., 183., 218., 230., 242.,
         209., 191., 172., 194., 196., 196., 236., 235., 229., 243., 264.,
         272., 237., 211., 180., 201., 204., 188., 235., 227., 234., 264.,
         302., 293., 259., 229., 203., 229., 242., 233., 267., 269., 270.,
         315., 364., 347., 312., 274., 237., 278., 284., 277., 317., 313.,
         318., 374., 413., 405., 355., 306., 271., 306., 315., 301., 356.,
         348., 355., 422., 465., 467., 404., 347., 305., 336., 340., 318.,
         362., 348., 363., 435., 491., 505., 404., 359., 310., 337., 360.,
         342., 406., 396., 420., 472., 548., 559., 463., 407., 362., 405.,
         417., 391., 419., 461., 472., 535., 622., 606., 508., 461., 390.,
         432.], dtype=np.float32)
    # assert seq_number.shape == (144, )
    # plt.plot(seq_number)
    # plt.ion()
    # plt.pause(10)
    seq_number = seq_number[:, np.newaxis]

    seq_year = np.arange(12)
    seq_month = np.arange(12)
    # np.repeat 复制的是多维数组的每一个元素
    # np.tile 复制的是多维数组本身
    seq_year_month = np.transpose(
        [np.repeat(seq_year, len(seq_month)),
         np.tile(seq_month, len(seq_year))],
    )  # Cartesian Product
    
    # [客流量、年、月] [144,3]
    seq = np.concatenate((seq_number, seq_year_month), axis=1) 
    seq = (seq-seq.mean(axis=0)) / seq.std(axis=0)
    return seq

'''
    **模型的搭建 **
    希望输入前九年的客流数据，预测后三年的客流
    可先在前九年的数据上，训练LSTM根据前几个月的数据预测下一个月的客流
    然后根据前九年的数据预测出下一个月的客流
    input.size() = (seq_len, batch_size, inp_dim)
    seq_len: 时间序列的长度 前九年共 9*12=108 所以 seq_len=108
    batch_size: 同个批次中输入的序列的条数
    inp_dim: 输入数据的维度， [客流量、年份、月份]三维数据，所以inp_dim=3
    mid_dim: LSTM三个门(gate)的网络宽度 其实就是hidden_size
    在LSTM后面接上两层全连接层 output_dim=1,看作是一个回归操作
'''
class RegLSTM(nn.Module):
    def __init__(self, inp_dim, out_dim, mid_dim, mid_layers):
        super(RegLSTM, self).__init__()

        self.rnn = nn.LSTM(inp_dim, mid_dim, mid_layers) # rnn
        self.reg = nn.Sequential(
            nn.Linear(mid_dim, mid_dim),
            nn.Tanh(),
            nn.Linear(mid_dim, out_dim)
        ) # 两层全连接层用于回归

    def forward(self, x):
        # self.rnn的 Inputs: input, (h_0, c_0) 如果没有(h_0, c_0)则默认全部为0
        # Outputs: output, (h_n, c_n)
        y = self.rnn(x)[0]
        # 解压缩
        # y = pad_packed_sequence(y)
        # print(y[0].shape)
        # y = y[0]
        seq_len, batch_size, hid_dim = y.shape
        y = y.view(-1,hid_dim) # view()的作用 把batch合并
        y = self.reg(y)
        y = y.view(seq_len, batch_size, -1)
        return y

    def output_y_hc(self, x, hc):
        y, hc = self.rnn(x, hc) # hc = (h_n, c_n)

        seq_len, batch_size, hid_dim = y.shape
        y = y.view(-1,hid_dim) # view()的作用 把batch合并
        y = self.reg(y)
        y = y.view(seq_len, batch_size, -1)
        return y, hc

'''
    **训练**
    同一批次中序列长度不同，需要使用 from torch.nn.utils.rnn import pad_sequence
'''
def run_train_lstm():
    inp_dim = 3 # [客流量、年、月]
    out_dim = 1
    mid_dim = 8
    mid_layers = 1
    batch_size = 12*5 # 四年的数据一输入
    mod_dir = '.'

    ''' load data'''
    data = load_data()
    data_x = data[:-1, :] # data_x 即输入为第一个到倒数第二个的数据
    data_y = data[1:, 0] # data_y 即label为第二个到最后一个的数据
    assert data_x.shape[1] == inp_dim #验证维度是否一致

    train_size = int(len(data_x) * 0.75) #训练集的大小

    train_x = data_x[:train_size]
    train_y = data_y[:train_size]
    train_x = train_x.reshape((train_size, inp_dim))
    train_y = train_y.reshape((train_size, out_dim))

    ''' build model'''
    # 在gpu上计算
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net = RegLSTM(inp_dim, out_dim, mid_dim, mid_layers).to(device)

    criterion = nn.MSELoss() # 输入是
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)

    '''train'''
    # 把numpy数据转化为tensor形式，便于进行卷积、激活、上下采样以及微分求导等操作
    var_x = torch.tensor(train_x, dtype=torch.float32, device=device)
    var_y = torch.tensor(train_y, dtype=torch.float32, device=device)

    batch_var_x = list()
    batch_var_y = list()

    for i in range(batch_size):
        j = train_size - i 
        batch_var_x.append(var_x[j:])
        batch_var_y.append(var_y[j:])
    
    # a = batch_var_x.copy()
    # a.sort(key=lambda i:len(i), reverse=True)
    # batch_var_x.sort(key=lambda i:len(i), reverse=True)
    
    # 放入pad_sequence的序列必须从长到短放置，随着反向传播的进行，Pytorch会逐步忽略完成梯度计算的短序列
    # 为什么没有batch_first??? padding_value=0 设置填充的数值
    # torch.nn.utils.rnn.pack_padded_sequence()通过利用pad之后的样本和每个原始序列的长度对补全后的样本进行pack。这样RNN模型在计算时，根据原来的样本长度就知道每个样本在何时结束，从而避免额外的pad的0值的影响。
    # 计算完之后通过torch.nn.utils.rnn.pad_packed_sequence()将输出的格式转换为pack之前的格式。
    # 在本案例中无法实现的原因在于 数据的起始点不同 按常理应该是seq的头部batch_size比较大，但是现在变成了尾部的batch_size比较大
    batch_var_x = pad_sequence(batch_var_x) # (47, 48, 3) (seq_len, batch_size, feature_dim)
    batch_var_y = pad_sequence(batch_var_y)

    # a = pad_sequence(a)

    # batch_var_x = pack_padded_sequence(batch_var_x, lengths=[len(a[i]) for i in range(len(a))]) 
    # print(batch_var_x[0].shape) # (长度和, 特征维度)

    # 操作不被track梯度
    # 为loss设置了权重，输入序列的长度越短，其误差的权重就越小
    with torch.no_grad():
        weights = np.tanh(np.arange(len(train_y)) * (np.e / len(train_y)))
        weights = torch.tensor(weights, dtype=torch.float32, device=device)
    
    print('Training Start')
    # 完整数据训练384次
    # 只有一个batch 所以省去了对batch的循环
    for epoch in range(384):
        out = net(batch_var_x)
        
        # loss = criterion(out, batch_var_y)
        loss = (out - batch_var_y) **2 * weights 
        loss = loss.mean() # 一个batch的loss算均值

        # 如果不清零，那么使用的这个grad就得同上一个mini-batch有关
        optimizer.zero_grad()
        loss.backward()
        # optimizer.step()需要放在每一个batch训练中
        optimizer.step()
        # scheduler.step()用来更新优化器的学习率，一般是按照epoch为单位进行更换

        if epoch % 64 == 0:
            print('Epoch: {:4}, Loss: {:.5f}'.format(epoch, loss.item()))
    # torch.save(model.state_dict) 只保存和恢复模型中的参数
    # torch.save(model) 会保存模型的参数和结构信息
    torch.save(net.state_dict(), '{}/net.pth'.format(mod_dir))
    print("Save in:", '{}/net.pth'.format(mod_dir))

    ''' eval '''
    '''
    使用前9年的数据作为输入，预测得到下一个月的客流，并将此预测结果加到输入序列中，从而逐步预测后三年的客流
    '''
    net.load_state_dict(torch.load('{}/net.pth'.format(mod_dir), map_location=lambda storage, loc: storage)) # Load all tensors onto the CPU, using a function
    '''
    is a kind of switch for some specific layers/parts of the model that behave differently during training and inference (evaluating) time. 
    For example, Dropouts Layers, BatchNorm Layers etc. You need to turn off them during model evaluation, and .eval() will do it for you. 
    In addition, the common practice for evaluating/validation is using torch.no_grad() in pair with model.eval() to turn off gradients computation
    '''
    net.eval() 

    test_x = data_x.copy()
    test_x[train_size:,0] = 0
    test_x = test_x[:, np.newaxis, :]
    test_x = torch.tensor(test_x, dtype=torch.float32, device=device)

    eval_size = 1
    #  **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`, the same with **c_n**
    zero_ten = torch.zeros((mid_layers, eval_size, mid_dim), dtype=torch.float32, device=device)
    test_y, hc = net.output_y_hc(test_x[:train_size], (zero_ten, zero_ten))
    test_x[train_size+1, 0, 0] = test_y[-1]
    for i in range(train_size + 1, len(data)-2):
        test_y, hc = net.output_y_hc(test_x[i:i+1], hc)
        test_x[i+1, 0, 0] = test_y[-1]
    pred_y = test_x[1:, 0, 0]
    pred_y = pred_y.cpu().data.numpy() 
    
    diff_y = pred_y[train_size:] - data_y[train_size : -1]
    l1_loss = np.mean(np.abs(diff_y))
    l2_loss = np.mean(diff_y ** 2)
    print("L1: {:.3f}    L2: {:.3f}".format(l1_loss, l2_loss))

    ''' 
    plot results
    '''
    plt.plot(pred_y, 'r', label='pred')
    plt.plot(data_y, 'b', label='real', alpha=0.3) # alpha set the value of transparent
    plt.plot([train_size, train_size], [-1, 2], color = 'k', label='train | pred')
    plt.legend(loc = 'best')
    plt.savefig('lstm_reg.png')
    # plt.pause(4)
    plt.show()

if __name__ == '__main__':
    run_train_lstm()










    






