# common file
import math
from matplotlib import pyplot as plt
import numpy as np
# 画图
from sklearn.preprocessing import MinMaxScaler


# 对数据集进行归一化
mm = MinMaxScaler()


def loss_draw(loss_log, score_log):
    plt.figure(figsize=(10, 5), dpi=100)
    plt.plot(loss_log, linewidth=1)
    plt.title('Loss Value')
    plt.xlabel('Number of batches')
    # plt.show()

    score_log = np.array(score_log)

    plt.figure(figsize=(10, 6), dpi=100)
    plt.subplot(2, 2, 1)
    plt.plot(score_log[:, 0], c='#d28ad4')
    plt.ylabel('RMSE')

    plt.subplot(2, 2, 2)
    plt.plot(score_log[:, 1], c='#e765eb')
    plt.ylabel('MAE')

    plt.subplot(2, 2, 3)
    plt.plot(score_log[:, 2], c='#6b016d')
    plt.ylabel('MAPE')

    plt.show()


# 固定长度滑动窗口
def sliding_window(seq, window_size, feature_dim):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i:i + window_size, :10, feature_dim])
    return result


def sliding_window_(seq, window_size):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i: i + window_size])
    return result


def show_data(datas, node_id):
    plt.plot(datas[:24 * 12, node_id, 0])  # 0维特征
    plt.savefig("node_{:3d}_1.png".format(node_id))

    plt.plot(datas[:24 * 12, node_id, 1])  # 1维特征
    plt.savefig("node_{:3d}_2.png".format(node_id))

    plt.plot(datas[:24 * 12, node_id, 2])  # 2维特征
    plt.savefig("node_{:3d}_3.png".format(node_id))


# 归一化
def feature_normalize(data):
    mu = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    return (data - mu) / std


def data_set(_path, train_proportion):
    # 处理加载数据集
    pe4Data = np.load(_path)["data"]  # 16992个数组，每个数组包含307行3列数据
    print(pe4Data.shape)
    pe4Data = np.array(pe4Data)
    # 暂时只预测车流量这个维度,即 16992*307
    # show_data(pe4Data, 10)
    dim_size = 1
    # 预测第一个传感器
    if dim_size == 1:
        pe4Data = pe4Data[:, :, 0]
        pe4Data = pe4Data[:, 0]
    else:
        pe4Data = pe4Data[:, :dim_size, 0]
    data_set = pe4Data
    total_len = pe4Data.shape[0]
    train_val_split = int(total_len * train_proportion)
    # 长序列 短序列
    train_seq, test_seq = pe4Data[:train_val_split], pe4Data[train_val_split:]
    # 训练集，测试集
    train_set = sliding_window_(train_seq, window_size=13)
    if dim_size != 1:
        train_set = np.concatenate(train_set, axis=1).transpose()  # axis=1对应的数组行数拼接在一起，并进行转置处理
    train_set = np.array(train_set)  # train_set[0] 流量，train_set[1] 拥挤程度，train_set[2]车速

    test_set = sliding_window_(test_seq, window_size=13)
    if dim_size != 1:
        test_set = np.concatenate(test_set, axis=1).transpose()
    test_set = np.array(test_set)

    return mm.fit_transform(train_set), mm.fit_transform(test_set)


def next_batch(data, batch_size):
    data_length = len(data)
    num_batches = math.ceil(data_length / batch_size) - 1  # 最后一批不足batch_size的舍去
    for batch_index in range(num_batches):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_length)
        yield data[start_index: end_index]


# 用于更新参数组中的学习率
def update_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


if __name__ == '__main__':
    _path = "data/PEMS04.npz"
    train_proportion = 0.7
    train_set, test_set = data_set(_path, train_proportion)
