import torch.nn as nn
import torch
import numpy as np
import math
from sklearn.utils import shuffle
import matplotlib.pyplot as plt

# ------------------------------------获取数据------------------------------
# 定义短序列滑动窗口
def sliding_window(seq, window_size):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i: i + window_size])
        # print("seq[i: i + window_size] {}".format(seq[i: i + window_size]))
    return result

# 归一化
def feature_normalize(data):
    mu = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    return (data - mu) / std

# 有效数据
def get_data(data_path, train_set_proportion):
    # 读取数据
    raw_df = np.load(data_path)['data']
    # 将list数据转换成vector [16992, 307, 3] 16992是时间(59*24*12)，307是节点数（有多少个传感器），3表示每一维特征的维度
    raw_df = np.array(raw_df)
    # 0维特征
    # plt.plot(raw_df[:24 * 12, node_id, 0])
    # plt.savefig("node_{:3d}_1.png".format(node_id))
    # # 1维特征
    # plt.plot(raw_df[:24 * 12, node_id, 1])
    # plt.savefig("node_{:3d}_2.png".format(node_id))
    # # 2维特征
    # plt.plot(raw_df[:24 * 12, node_id, 2])
    # plt.savefig("node_{:3d}_3.png".format(node_id))
    # plt.show()
    # 划分训练和测试比例
    # 目前只预测流量 [16992, 307]
    raw_df = raw_df[:, :, 0]
    # 目前只预测一个传感器的流量
    raw_df = raw_df[:, 0]
    # 将数据进行归一化
    raw_df = feature_normalize(raw_df)
    # print("raw_df {}".format(raw_df.shape))
    total_len = int(raw_df.shape[0])
    val_test_split = int(total_len * train_set_proportion)
    # train是训练集，val是训练过程中的测试集，是为了让你在边训练边看到训练的结果，
    # 及时判断学习状态。test就是训练模型结束后，用于评价模型结果的测试集
    train_seq, test_seq = raw_df[0: val_test_split], \
                          raw_df[val_test_split: total_len]
    # 训练集，测试集
    train_set = sliding_window(train_seq, window_size=13)
    train_set = np.array(train_set)

    test_set = sliding_window(test_seq, window_size=13)
    test_set = np.array(test_set)

    print("train_set.shape {}".format(train_set.shape))
    print("test_set.shape {}".format(test_set.shape))
    return train_set, test_set

# ------------------------------------循环神经网络模型------------------------------
class My_RNN(nn.Module):
    # 初始化参数
    # input_size 指定输入数据的维度；hidden_size 隐藏状态的维度；out_size 输出数据的维度
    def __init__(self, input_size, hidden_size, output_size):
        super().__init__()
        self.hidden_size = hidden_size

        # 参数维度设置
        self.w_h = nn.Parameter(torch.rand(input_size, hidden_size))
        self.u_h = nn.Parameter(torch.rand(hidden_size, hidden_size))
        self.b_h = nn.Parameter(torch.zeros(hidden_size))
        self.w_y = nn.Parameter(torch.rand(hidden_size, output_size))
        self.b_y = nn.Parameter(torch.rand(output_size))

        # 定义激活函数
        self.tanh = nn.Tanh()
        self.leaky_relu = nn.LeakyReLU()

        # 初始化参数
        for param in self.parameters():
            if param.dim() > 1:
                nn.init.xavier_normal_(param)

    # 隐藏态更新
    def forward(self, x):
        # 输入x一般包含：batch，序列长度，每条数据特征
        batch_size = x.size(0)
        seq_len = x.size(1)
        # print("x11 {}".format(x.shape))

        # 初始化隐藏态
        h = torch.zeros(batch_size, self.hidden_size)
        # 循环迭代处理序列
        y_list = []
        for i in range(seq_len):
            h = self.tanh(torch.matmul(x[:, i, :], self.w_h) + torch.matmul(h, self.u_h) + self.b_h)
            y = self.leaky_relu(torch.matmul(h, self.w_y) + self.b_y)
            y_list.append(y)
        return h, torch.stack(y_list, dim=1)

# ---------------------------------------初始化模型---------------------------------------------
model = My_RNN(input_size=1, hidden_size=32, output_size=1)

# ---------------------------------------初始化损失函数---------------------------------------------
loss = nn.MSELoss()

# ---------------------------------------定义优化器---------------------------------------------
lr = 0.0005
optimizer = torch.optim.Adam(model.parameters(), lr)

# ---------------------------------------定义指标函数---------------------------------------------
def mape(y_true, y_pred):
    # print("y_true {}".format(y_true))
    # print("y_pred {}".format(y_pred))
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    non_zero_index = (y_true > 0)
    y_true = y_true[non_zero_index]
    y_pred = y_pred[non_zero_index]

    mape = np.abs((y_true - y_pred) / y_true)
    mape[np.isinf(mape)] = 0
    return np.mean(mape) * 100

def mse(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.sqrt(np.mean(np.square(y_true - y_pred)))

def mae(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs(y_true - y_pred))

# ---------------------------------------读取批次---------------------------------------------
def next_batch(data, batch_size):
    # 数据总长度
    data_length = len(data)
    # 分多少个批次
    num_batches = math.ceil(data_length / batch_size)
    for batch_index in range(num_batches):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_length)
        yield data[start_index:end_index, :]

# --------------------------------------绘制损失图像-------------------------------------------
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# --------------------------------------训练函数---------------------------------------------
def train(epochs, batch_size, train_set, test_set):
    loss_log, mape_log, mse_log, mae_log, trained_batches = [], [], [], [], 0
    loss_sum, rmse_score_sum, mae_score_sum, mape_score_sum = 0, 0, 0, 0
    for epoch in range(epochs):
        for batch in next_batch(shuffle(train_set), batch_size):
            batch = torch.from_numpy(batch).float()
            # print("batch {}".format(batch.shape))
            # 使用短序列的前十二个作为历史，最后一个作为预测值
            x, lable = batch[:, :12], batch[:, -1]
            hidden, out = model(x.unsqueeze(-1))
            predicition = out[:, -1, :].squeeze(-1)
            # 计算损失
            # print("predicition-lable {}".format(predicition-lable))
            l = loss(predicition, lable)
            # 损失求和
            loss_sum += l
            # 梯度清零
            optimizer.zero_grad()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 训练批次
            trained_batches += 1

        rmse_score, mae_score, mape_score = test_data(batch_size, test_set)

        loss_log.append(loss_sum/trained_batches)
        mape_log.append(mape_score)
        mse_log.append(rmse_score)
        mae_log.append(mae_score)
        # 输出结果
        print("loss %.4f, rmse_score %.4f, mae_score %.4f, mape_score %.4f" %
              (loss_sum/trained_batches, rmse_score, mae_score, mape_score))

    # 绘制图像
    loss_curve(range(1, epochs + 1), loss_log, "epochs", "loss")
    loss_curve(range(1, epochs + 1), mape_log, "mape_log", "mape")
    loss_curve(range(1, epochs + 1), mse_log, "mse_log", "mse")
    loss_curve(range(1, epochs + 1), mae_log, "mae_log", "mae")

def test_data(batch_size, test_set):
    all_prediction = []
    for batch in next_batch(test_set, batch_size):
        batch = torch.from_numpy(batch).float()
        hidden, out = model(batch.unsqueeze(-1))
        predicition = out[:, -1, :].squeeze(-1)
        all_prediction.append(predicition.detach().cpu().numpy())
    all_prediction = np.concatenate(all_prediction)
    all_lable = test_set[:, -1]
    # 计算指标
    rmse_score = math.sqrt(mse(all_lable, all_prediction))
    mae_score = mae(all_lable, all_prediction)
    mape_score = mape(all_lable, all_prediction)
    # 输出结果
    return rmse_score, mae_score, mape_score


if __name__ == '__main__':
    data_path = "F:\交大\学习资料\深度学习\作业\实验4-数据\高速公路传感器数据\PEMS04\PEMS04.npz"
    # 长序列划分比例
    train_set_proportion = 0.8
    # 获取训练数据
    train_set, test_set = get_data(data_path, train_set_proportion)
    # 训练轮次，批次
    epochs, batch_size = 1000, 128
    # 训练
    train(epochs, batch_size, train_set, test_set)
















