# common file
import math
from matplotlib import pyplot as plt
import numpy as np
# 画图
from sklearn.preprocessing import MinMaxScaler


# 对数据集进行归一化
mm = MinMaxScaler()


def normalization(datas, test_datas):  # fit_transform() 会储存归一化函数
    """
    NOTE：
    fit_transform() 和 transform()的区别。两者都是归一化函数，但是fit_transform() 会储存归一化函数是的相关参数，
    因此对训练集使用fit_transform() ，储存了训练集归一化的相关参数，然后利用这些参数对测试集进行统一的归一化transform()
    【切记不能再使用fit_transform() ，第二次使用fit_transform() 会刷新mm里储存的参数！！】 。
    """
    if datas.ndim > 3:
        print("expected dim <= 3 ")
        return
    elif datas.ndim == 3:
        for i in range(datas.shape[0]):
            datas[i] = mm.fit_transform(datas[i])
        for j in range(test_datas.shape[0]):
            test_datas[j] = mm.transform(test_datas[j])
    else:
        datas = mm.fit_transform(datas)
        test_datas = mm.transform(test_datas)

    return datas, test_datas


def unnormalization(datas):
    """
    反归一化时任然要使用归一化时储存的参数和格式。归一化时使用的是mm = MinMaxScaler()，
    因此后面仍然要使用mm进行反归一化；归一化时fit_transform(train_data.values) 中的
    train_data.values是n3维度（这里10个样本，即为103）的数组，因此反归一化时的数据也必须是3列，
    即m*3（m为测试集样本数，这里为5）
    """
    if datas.ndim > 3:
        print("expected dim <= 3 ")
        return
    elif datas.ndim == 3:
        for i in range(datas.shape[0]):
            datas[i] = mm.inverse_transform(datas[i])
    else:
        datas = mm.inverse_transform(datas)

    return datas


def loss_draw(loss_log=None, score_log=None):
    if loss_log:
        plt.figure(figsize=(10, 5), dpi=100)
        plt.plot(loss_log, linewidth=1)
        plt.title('Loss Value')
        plt.xlabel('Number of batches')
        plt.show()

    score_log = np.array(score_log)

    plt.figure(figsize=(10, 6), dpi=100)
    plt.subplot(2, 2, 1)
    plt.plot(score_log[:, 0], c='#d28ad4')
    plt.ylabel('RMSE')

    plt.subplot(2, 2, 2)
    plt.plot(score_log[:, 1], c='#e765eb')
    plt.ylabel('MAE')

    # plt.subplot(2, 2, 3)
    # plt.plot(score_log[:, 2], c='#6b016d')
    # plt.ylabel('MAPE')

    plt.show()


def sliding_window_(seq, window_size):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i: i + window_size])
    return result


def show_data(datas, node_id):
    plt.plot(datas[:, node_id, 0])  # 0维特征
    plt.savefig("node_{:3d}_1.png".format(node_id))

    plt.plot(datas[:, node_id, 1])  # 1维特征
    plt.savefig("node_{:3d}_2.png".format(node_id))


# 归一化
def feature_normalize(data):
    mu = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    return (data - mu) / std


def data_set(train_path, test_path, train_proportion, window_size):
    """
    以训练集为例，其shape=(1920*10*20*2) 代表有1920个时间段，10*20个区域，2个特征分别为区域的入流量与出流量
    :param _path:
    :param train_proportion:
    :return:
    """
    volumeData = np.load(open(train_path, "rb"))["volume"]
    volumeData_test = np.load(open(test_path, "rb"))["volume"]
    print(volumeData.shape)
    volumeData = np.array(volumeData)
    # 暂时只预测车流量这个维度,即 16992*307
    volumeData = volumeData.reshape(1920, -1, 2)  # 合并第二三维度的区域
    volumeData_test = volumeData_test.reshape(960, -1, 2)  # 合并第二三维度的区域
    #show_data(volumeData, 1)  # 暂时展示一个区域的流量
    show_data(volumeData_test, 1)  # 暂时展示一个区域的流量
    # data_set = volumeData
    total_len = volumeData.shape[0]
    volumeData, volumeData_test = normalization(volumeData, volumeData_test)  # 归一化
    train_val_split = int(total_len * train_proportion)
    # 长序列 短序列
    train_seq, test_seq = volumeData[:train_val_split], volumeData[train_val_split:]
    # 训练集，训练测试集
    train_set = sliding_window_(train_seq, window_size=window_size)
    # if dim_size != 1:
    # train_set = np.concatenate(train_set, axis=1).transpose()  # axis=1对应的数组行数拼接在一起，并进行转置处理
    train_set = np.array(train_set)  # train_set[0] 流量，train_set[1] 拥挤程度，train_set[2]车速

    train_test_set = sliding_window_(test_seq, window_size=window_size)
    train_test_set = np.array(train_test_set)

    # 只有结果评估才可以用测试集
    volumeData_test = sliding_window_(volumeData_test, window_size=window_size)
    test_set = np.array(volumeData_test)

    return train_set, train_test_set, test_set


def next_batch(data, batch_size):
    data_length = len(data)
    num_batches = math.ceil(data_length / batch_size) - 1  # 最后一批不足batch_size的舍去
    for batch_index in range(num_batches):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_length)
        yield data[start_index: end_index]


# 用于更新参数组中的学习率
def update_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


if __name__ == '__main__':
    _path = "data/volume_train.npz"
    test_path = "data/volume_test.npz"
    train_proportion = 0.7
    train_set, test_set, result_test_set = data_set(_path, test_path, train_proportion, 7)
