# common file
import math

from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import torch
import csv
import os
from torch.utils.data import Dataset, DataLoader


# 画图
def loss_draw(loss_log, score_log):
    plt.figure(figsize=(10, 5), dpi=100)
    plt.plot(loss_log, linewidth=1)
    plt.title('Loss Value')
    plt.xlabel('Number of batches')
    # plt.show()

    score_log = np.array(score_log)

    plt.figure(figsize=(10, 6), dpi=100)
    plt.subplot(2, 2, 1)
    plt.plot(score_log[:, 0], c='#d28ad4')
    plt.ylabel('RMSE')

    plt.subplot(2, 2, 2)
    plt.plot(score_log[:, 1], c='#e765eb')
    plt.ylabel('MAE')

    plt.subplot(2, 2, 3)
    plt.plot(score_log[:, 2], c='#6b016d')
    plt.ylabel('MAPE')

    plt.show()


# 固定长度滑动窗口
def sliding_window(seq, window_size, feature_dim):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i:i + window_size, :10, feature_dim])
    return result


# 固定长度滑动窗口
def sliding_window_(seq, window_size):
    result = []
    for i in range(len(seq) - window_size):
        result.append(seq[i: i + window_size])
    return result


def show_data(datas, node_id):
    plt.plot(datas[:24 * 12, node_id, 0])  # 0维特征
    plt.savefig("node_{:3d}_1.png".format(node_id))

    plt.plot(datas[:24 * 12, node_id, 1])  # 1维特征
    plt.savefig("node_{:3d}_2.png".format(node_id))

    plt.plot(datas[:24 * 12, node_id, 2])  # 2维特征
    plt.savefig("node_{:3d}_3.png".format(node_id))


# 归一化
def feature_normalize(data):
    mu = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    return (data - mu) / std


def data_set(_path, train_proportion):
    # 处理加载数据集
    pe4Data = np.load(_path)["data"]  # 16992个数组，每个数组包含307行3列数据
    print(pe4Data.shape)
    # 暂时只预测车流量这个维度,即 16992*307
    show_data(pe4Data, 10)
    dim_size = 10
    pe4Data = pe4Data[:, :dim_size, 0]
    # 预测第一个传感器
    if dim_size == 0:
        pe4Data = pe4Data[:, 0]
    data_set = pe4Data
    pe4Data = feature_normalize(pe4Data)  # 归一化
    total_len = pe4Data.shape[0]
    train_val_split = int(total_len * train_proportion)
    # 长序列 短序列
    train_seq, test_seq = pe4Data[:train_val_split], pe4Data[train_val_split:]
    # 训练集，测试集
    train_set = sliding_window_(train_seq, window_size=13)
    if dim_size != 0:
        train_set = np.concatenate(train_set, axis=1).transpose()  # axis=1对应的数组行数拼接在一起，并进行转置处理
    train_set = np.array(train_set)  # train_set[0] 流量，train_set[1] 拥挤程度，train_set[2]车速

    test_set = sliding_window_(test_seq, window_size=13)
    if dim_size != 0:
        test_set = np.concatenate(test_set, axis=1).transpose()
    test_set = np.array(test_set)

    return data_set, train_set, test_set


def get_data_set(_path, train_proportion):
    # 处理加载数据集
    pe4Data = np.load(_path)["data"]  # 16992个数组，每个数组包含307行3列数据
    print(pe4Data.shape)
    show_data(pe4Data, 10)
    feature_dim = 0  # 选择第一维数据进行预测
    window_size = 13
    # 由于时间轴是完全规整的，我们可以并行式地对所有传感器进行滑动窗口。

    total_len = pe4Data.shape[0]
    train_val_split = int(total_len * train_proportion)
    # 长序列 短序列
    train_seq, test_seq = pe4Data[:train_val_split], pe4Data[train_val_split:]
    # 训练集，测试集
    train_set = sliding_window(train_seq, window_size=window_size, feature_dim=feature_dim)
    train_set = np.concatenate(train_set, axis=1).transpose()  # axis=1对应的数组行数拼接在一起，并进行转置处理
    train_set = np.array(train_set)  # train_set[0] 流量，train_set[1] 拥挤程度，train_set[2]车速

    test_set = sliding_window(test_seq, window_size=window_size, feature_dim=feature_dim)
    test_set = np.concatenate(test_set, axis=1).transpose()
    test_set = np.array(test_set)

    return pe4Data, train_set, test_set


def next_batch(data, batch_size):
    data_length = len(data)
    num_batches = math.ceil(data_length / batch_size) - 1  # 最后一批不足batch_size的舍去
    for batch_index in range(num_batches):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_length)
        yield data[start_index: end_index]


# 用于更新参数组中的学习率
def update_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def get_flow_data(flow_file: str) -> np.array:  # 这个是载入流量数据,返回numpy的多维数组
    """
    :param flow_file: str, path of .npz file to save the traffic flow data
    :return:
        np.array(N, T, D)
    """
    data = np.load(flow_file)

    flow_data = data['data'].transpose([1, 0, 2])[:, :, 0][:, :,
                np.newaxis]  # [N, T, D],transpose就是转置，让节点纬度在第0位，N为节点数，T为时间，D为节点特征
    # [:, :, 0]就是只取第一个特征，[:, :, np.newaxis]就是增加一个维度，因为：一般特征比一个多，即使是一个，保持这样的习惯，便于通用的处理问题

    return flow_data  # [N, T, D]


def load_data(_path):
    # 处理加载数据集
    pe4_flow_data = np.load(_path)["data"]  # 16992个数组，每个数组包含307行3列数据
    print([key for key in pe4_flow_data.keys()])
    print(pe4_flow_data.shape)  # (16992, 307, 3)，16992是时间(59*24*12)，307是节点数，3表示每一维特征的维度（类似于二维的列）
    show_data(pe4_flow_data, 10)

    pe4_flow_data = pe4_flow_data.transpose([1, 0, 2])[:, :, 0][:, :, np.newaxis]
    # [N, T, D],transpose就是转置，让节点纬度在第0位，N为节点数，T为时间，D为节点特征
    # 对np.newaxis说一下，就是增加一个维度，这是因为一般特征比一个多，即使是一个，保持这样的习惯，便于通用的处理问题
    return pe4_flow_data


def get_adjacent_matrix(distance_file: str, num_nodes: int, id_file: str = None, graph_type="connect") -> np.array:
    """
    :param distance_file: str, path of csv file to save the distances between nodes.
    :param num_nodes: int, number of nodes in the graph
    :param id_file: str, path of txt file to save the order of the nodes.就是排序节点的绝对编号所用到的，这里排好了，不需要
    :param graph_type: str, ["connect", "distance"]，这个就是考不考虑节点之间的距离
    :return:
        np.array(N, N)
    """
    A = np.zeros([int(num_nodes), int(num_nodes)])  # 构造全0的邻接矩阵

    if id_file:  # 就是给节点排序的绝对文件，这里是None，则表示不需要
        with open(id_file, "r") as f_id:
            # 将绝对编号用enumerate()函数打包成一个索引序列，然后用node_id这个绝对编号做key，用idx这个索引做value
            node_id_dict = {int(node_id): idx for idx, node_id in enumerate(f_id.read().strip().split("\n"))}

            with open(distance_file, "r") as f_d:
                f_d.readline()  # 表头，跳过第一行.
                reader = csv.reader(f_d)  # 读取.csv文件.
                for item in reader:  # 将一行给item组成列表
                    if len(item) != 3:  # 长度应为3，不为3则数据有问题，跳过
                        continue
                    i, j, distance = int(item[0]), int(item[1]), float(item[2])  # 节点i，节点j，距离distance
                    if graph_type == "connect":  # 这个就是将两个节点的权重都设为1，也就相当于不要权重
                        A[node_id_dict[i], node_id_dict[j]] = 1.
                        A[node_id_dict[j], node_id_dict[i]] = 1.
                    elif graph_type == "distance":  # 这个是有权重，下面是权重计算方法
                        A[node_id_dict[i], node_id_dict[j]] = 1. / distance
                        A[node_id_dict[j], node_id_dict[i]] = 1. / distance
                    else:
                        raise ValueError("graph type is not correct (connect or distance)")
        return A

    with open(distance_file, "r") as f_d:
        f_d.readline()  # 表头，跳过第一行.
        reader = csv.reader(f_d)  # 读取.csv文件.
        for item in reader:  # 将一行给item组成列表
            if len(item) != 3:  # 长度应为3，不为3则数据有问题，跳过
                continue
            i, j, distance = int(item[0]), int(item[1]), float(item[2])

            if graph_type == "connect":  # 这个就是将两个节点的权重都设为1，也就相当于不要权重
                A[i, j], A[j, i] = 1., 1.
            elif graph_type == "distance":  # 这个是有权重，下面是权重计算方法
                A[i, j] = 1. / distance
                A[j, i] = 1. / distance
            else:
                raise ValueError("graph type is not correct (connect or distance)")

    return A


class LoadData(Dataset):  # 这个就是把读入的数据处理成模型需要的训练数据和测试数据，一个一个样本能读取出来
    def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):
        """
        :param data_path: list, ["graph file name" , "flow data file name"], path to save the data file names.
        :param num_nodes: int, number of nodes.
        :param divide_days: list, [ days of train data, days of test data], list to divide the original data.
        :param time_interval: int, time interval between two traffic data records (mins).---5 mins
        :param history_length: int, length of history data to be used.
        :param train_mode: list, ["train", "test"].
        """

        self.data_path = data_path
        self.num_nodes = num_nodes
        self.train_mode = train_mode
        self.train_days = divide_days[0]  # 59-14 = 45, train_data
        self.test_days = divide_days[1]  # 7*2 = 14 ,test_data
        self.history_length = history_length  # 30/5 = 6, 历史长度为6
        self.time_interval = time_interval  # 5 min

        self.one_day_length = int(24 * 60 / self.time_interval)  # 一整天的数据量

        self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)

        self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),
                                                               norm_dim=1)  # self.flow_norm为归一化的基

    def __len__(self):  # 表示数据集的长度
        """
        :return: length of dataset (number of samples).
        """
        if self.train_mode == "train":
            return self.train_days * self.one_day_length - self.history_length
            # 　训练的样本数　＝　训练集总长度　－　历史数据长度
        elif self.train_mode == "test":
            return self.test_days * self.one_day_length
            # 　每个样本都能测试，测试样本数　＝　测试总长度
        else:
            raise ValueError("train mode: [{}] is not defined".format(self.train_mode))

    def __getitem__(self, index):
        # 功能是如何取每一个样本 (x, y), index = [0, L1 - 1]这个是根据数据集的长度确定的
        """
        :param index: int, range between [0, length - 1].
        :return:
            graph: torch.tensor, [N, N].
            data_x: torch.tensor, [N, H, D].
            data_y: torch.tensor, [N, 1, D].
        """
        if self.train_mode == "train":
            index = index
            # 　训练集的数据是从时间０开始的，这个是每一个流量数据，要和样本（ｘ,y）区别
        elif self.train_mode == "test":
            index += self.train_days * self.one_day_length  # 有一个偏移量
        else:
            raise ValueError("train mode: [{}] is not defined".format(self.train_mode))

        # 这个就是样本(ｘ, y)
        data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode)
        data_x = LoadData.to_tensor(data_x)  # [N, H, D] # 转换成张量
        data_y = LoadData.to_tensor(data_y).unsqueeze(1)  # [N, 1, D]　# 转换成张量，在时间维度上扩维
        # 组成词典返回
        return {"graph": LoadData.to_tensor(self.graph), "flow_x": data_x, "flow_y": data_y}

    @staticmethod
    def slice_data(data, history_length, index, train_mode):
        """
        #　根据历史长度,下标来划分数据样本
        :param data: np.array, normalized traffic data.
        :param history_length: int, length of history data to be used.
        :param index: int, index on temporal axis.
        :param train_mode: str, ["train", "test"].
        :return:
            data_x: np.array, [N, H, D].
            data_y: np.array [N, D].
        """
        if train_mode == "train":
            start_index = index  # 开始下标就是时间下标本身，这个是闭区间
            end_index = index + history_length  # 结束下标,这个是开区间
        elif train_mode == "test":
            start_index = index - history_length  # 开始下标，这个最后面贴图了，可以帮助理解
            end_index = index  # 结束下标
        else:
            raise ValueError("train model {} is not defined".format(train_mode))

        data_x = data[:, start_index: end_index]  # 在切第二维，不包括end_index
        data_y = data[:, end_index]  # 把上面的end_index取上

        return data_x, data_y

    @staticmethod
    def pre_process_data(data, norm_dim):  # 预处理,归一化
        """
        :param data: np.array,原始的交通流量数据
        :param norm_dim: int,归一化的维度，就是说在哪个维度上归一化,这里是在dim=1时间维度上
        :return:
            norm_base: list, [max_data, min_data], 这个是归一化的基.
            norm_data: np.array, normalized traffic data.
        """
        # 计算 normalize base
        norm_base = LoadData.normalize_base(data, norm_dim)
        # 归一化后的流量数据
        norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data)
        # 返回基是为了恢复数据做准备的
        return norm_base, norm_data

    @staticmethod
    def normalize_base(data, norm_dim):

        """
        ＃计算归一化的基
        :param data: np.array, 原始的交通流量数据
        :param norm_dim: int, normalization dimension.归一化的维度，就是说在哪个维度上归一化,这里是在dim=1时间维度上
        :return:
            max_data: np.array
            min_data: np.array
        """
        # [N, T, D] , norm_dim=1, [N, 1, D], keepdims=True就保持了纬度一致
        max_data = np.max(data, norm_dim, keepdims=True)
        min_data = np.min(data, norm_dim, keepdims=True)

        return max_data, min_data  # 返回最大值和最小值

    @staticmethod
    def normalize_data(max_data, min_data, data):
        """
        ＃计算归一化的流量数据，用的是最大值最小值归一化法
        :param max_data: np.array, max data.
        :param min_data: np.array, min data.
        :param data: np.array, original traffic data without normalization.
        :return:
            np.array, normalized traffic data.
        """
        mid = min_data
        base = max_data - min_data
        normalized_data = (data - mid) / base

        return normalized_data

    @staticmethod
    def recover_data(max_data, min_data, data):  # 恢复数据时使用的，为可视化比较做准备的
        """
        :param max_data: np.array, max data.
        :param min_data: np.array, min data.
        :param data: np.array, normalized data.
        :return:
            recovered_data: np.array, recovered data.
        """
        mid = min_data
        base = max_data - min_data

        recovered_data = data * base + mid
        # 这个就是原始的数据
        return recovered_data

    @staticmethod
    def to_tensor(data):
        return torch.tensor(data, dtype=torch.float)


def get_data(_path_npz, _path_csv, batch_size):
    # 读取数据
    train_d = LoadData(data_path=[_path_csv, _path_npz], num_nodes=307, divide_days=[45, 14],
                       time_interval=5, history_length=6, train_mode="train")
    test_data = LoadData(data_path=[_path_csv, _path_npz], num_nodes=307, divide_days=[45, 14],
                         time_interval=5, history_length=6, train_mode="test")

    # num_workers是加载数据（batch）的线程数目
    train_loader = DataLoader(train_d, batch_size=batch_size, shuffle=True, num_workers=1)

    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=1)
    return train_d, train_loader, test_data, test_loader


if __name__ == '__main__':
    # train_data = LoadData(data_path=["/Users/ryantong/Documents/实验4-数据/高速公路传感器数据/PEMS04/PeMS04.csv",
    #                                  "/Users/ryantong/Documents/实验4-数据/高速公路传感器数据/PEMS04/PEMS04.npz"],
    #                       num_nodes=307, divide_days=[45, 14],
    #                       time_interval=5, history_length=6,
    #                       train_mode="train")
    #
    # print(len(train_data))
    # print(train_data[0]["flow_x"].size())
    # print(train_data[0]["flow_y"].size())
    _path = "/Users/ryantong/Documents/实验4-数据/高速公路传感器数据/PEMS04/PEMS04.npz"
    train_proportion = 0.7
    dataset1, train_set, test_set = data_set(_path, train_proportion)
