import torch.nn as nn
import torch
import numpy as np
import math
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import time

# ------------------------------------获取数据------------------------------
# 定义短序列滑动窗口
def sliding_window(seq, window_size):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i: i + window_size])
    return result

# 对训练集和测试集进行归一化
mm = MinMaxScaler()

# 有效数据
def get_data(data_path, train_set_proportion):
    # 读取数据
    raw_df = np.load(data_path)['data']
    # 将list数据转换成vector [16992, 307, 3] 16992是时间(59*24*12)，307是节点数（有多少个传感器），3表示每一维特征的维度
    raw_df = np.array(raw_df)
    # 0维特征
    # plt.plot(raw_df[:24 * 12, node_id, 0])
    # plt.savefig("node_{:3d}_1.png".format(node_id))
    # # 1维特征
    # plt.plot(raw_df[:24 * 12, node_id, 1])
    # plt.savefig("node_{:3d}_2.png".format(node_id))
    # # 2维特征
    # plt.plot(raw_df[:24 * 12, node_id, 2])
    # plt.savefig("node_{:3d}_3.png".format(node_id))
    # plt.show()
    # 划分训练和测试比例
    # 目前只预测流量 [16992, 307]
    raw_df = raw_df[:, :, 0]
    # 目前只预测一个传感器的流量
    raw_df = raw_df[:, 0]
    # print("raw_df {}".format(raw_df.shape))
    total_len = int(raw_df.shape[0])
    val_test_split = int(total_len * train_set_proportion)
    # train是训练集，val是训练过程中的测试集，是为了让你在边训练边看到训练的结果，
    # 及时判断学习状态。test就是训练模型结束后，用于评价模型结果的测试集
    train_seq, test_seq = raw_df[0: val_test_split], \
                          raw_df[val_test_split: total_len]
    # 训练集，测试集
    train_set = sliding_window(train_seq, window_size=13)
    train_set = np.array(train_set)

    test_set = sliding_window(test_seq, window_size=13)
    test_set = np.array(test_set)

    print("train_set.shape {}".format(train_set.shape))
    print("test_set.shape {}".format(test_set.shape))
    return mm.fit_transform(train_set), mm.transform(test_set)

# ------------------------------------循环神经网络模型------------------------------
rnn = nn.RNN(input_size=1, hidden_size=64, num_layers=1, batch_first=True)
# 输出映射
out_linear = nn.Sequential(nn.Linear(64, 1), nn.LeakyReLU())

# ---------------------------------------初始化损失函数---------------------------------------------
loss = nn.MSELoss()

# ---------------------------------------定义优化器---------------------------------------------
lr = 0.00005
optimizer = torch.optim.Adam(list(rnn.parameters()) + list(out_linear.parameters()), lr=lr)

# ---------------------------------------定义指标函数---------------------------------------------
def mape(y_true, y_pred):
    # print("y_true {}".format(y_true))
    # print("y_pred {}".format(y_pred))
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    non_zero_index = (y_true > 0)
    y_true = y_true[non_zero_index]
    y_pred = y_pred[non_zero_index]

    mape = np.abs((y_true - y_pred) / y_true)
    mape[np.isinf(mape)] = 0
    return np.mean(mape) * 100

def mse(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.sqrt(np.mean(np.square(y_true - y_pred)))

def mae(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs(y_true - y_pred))

# ---------------------------------------读取批次---------------------------------------------
def next_batch(data, batch_size):
    # 数据总长度
    data_length = len(data)
    # 分多少个批次
    num_batches = math.ceil(data_length / batch_size)
    for batch_index in range(num_batches):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_length)
        yield data[start_index:end_index, :]

# --------------------------------------绘制损失图像-------------------------------------------
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# --------------------------------------训练函数---------------------------------------------
def train(epochs, batch_size, train_set, test_set):
    loss_log, score_log, trained_batches = [], [], 0
    loss_sum = 0
    for epoch in range(epochs):
        for batch in next_batch(shuffle(train_set), batch_size):
            batch = torch.from_numpy(batch).float()
            # print("batch {}".format(batch.shape))
            # 使用短序列的前十二个作为历史，最后一个作为预测值
            x, lable = batch[:, :12], batch[:, -1]
            # out: (batch_size, seq_len, hidden_size)
            out, hidden = rnn(x.unsqueeze(-1))
            out = out_linear(out[:, -1, :])
            # (batch)
            prediction = out.squeeze(-1)
            # 计算损失
            l = loss(prediction, lable)
            # 损失求和
            loss_sum += l
            # 梯度清零
            optimizer.zero_grad()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 训练批次
            trained_batches += 1
            if trained_batches % 100 == 0:
                # 测试函数
                rmse_score, mae_score, mape_score = test_data(batch_size, test_set)
                score_log.append([rmse_score, mae_score, mape_score])
                print('RMSE: %.4f, MAE: %.4f, MAPE: %.4f' % (rmse_score, mae_score, mape_score))

        loss_log.append(loss_sum / trained_batches)

    # 绘制图像
    loss_curve(range(1, epochs + 1), loss_log, "epochs", "loss")
    # 评判指标图像
    score_log = np.array(score_log)

    plt.figure(figsize=(10, 6), dpi=300)
    plt.subplot(2, 2, 1)
    plt.plot(score_log[:, 0], c='#d28ad4')
    plt.ylabel('RMSE')
    plt.show()

    plt.subplot(2, 2, 2)
    plt.plot(score_log[:, 1], c='#e765eb')
    plt.ylabel('MAE')
    plt.show()

    plt.subplot(2, 2, 3)
    plt.plot(score_log[:, 2], c='#6b016d')
    plt.ylabel('MAPE')
    plt.show()

def test_data(batch_size, test_set):
    all_prediction = []
    for batch in next_batch(test_set, batch_size):
        batch = torch.from_numpy(batch).float()
        # 预测值
        out, hidden = rnn(batch.unsqueeze(-1))
        out = out_linear(out[:, -1, :])
        # (batch)
        prediction = out.squeeze(-1)
        all_prediction.append(prediction.detach().cpu().numpy())
    all_lable = test_set[:, -1]
    # 压缩数据
    all_prediction = np.concatenate(all_prediction)
    # 计算指标
    rmse_score = math.sqrt(mse(all_lable, all_prediction))
    mae_score = mae(all_lable, all_prediction)
    mape_score = mape(all_lable, all_prediction)
    # 输出结果
    return rmse_score, mae_score, mape_score


if __name__ == '__main__':
    # 开始时间
    startTime = time.time()
    data_path = "F:\交大\学习资料\深度学习\作业\实验4-数据\高速公路传感器数据\PEMS04\PEMS04.npz"
    # 长序列划分比例
    train_set_proportion = 0.8
    # 获取训练数据
    train_set, test_set = get_data(data_path, train_set_proportion)
    # 训练轮次，批次
    epochs, batch_size = 50, 128
    # 训练
    train(epochs, batch_size, train_set, test_set)
    # 结束时间
    endTime = time.time()
    print("RNN 50轮次训练时间为 {}".format(endTime - startTime))
