# coding=utf-8
# @author:      ChengJing
# @name:        dataset.py
# @datetime:    2021/6/30 16:20
# @software:    PyCharm
# @description:

import wntr
import numpy as np
import pandas as pd
import networkx as nx
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset


class CreateGraph:
    """
        根据管网模型创建图结构
    """

    def __init__(self, wn):
        """
        Args:
            wn: wntr模块的epanet水力模型
        """
        self.wn = wn

    def reverse_pipes(self):
        """
        搜集管网模型反向流动的管段
        Returns:
            pipes: list，反流的管段id集合
        """
        time = self.wn.options.time.duration
        self.wn.options.time.duration = 0
        sim = wntr.sim.EpanetSimulator(self.wn)
        result = sim.run_sim()
        self.wn.options.time.duration = time
        self.wn.reset_initial_values()
        pipes = result.link['flowrate'].iloc[0,
                                             :][result.link['flowrate'].iloc[0, :] < 0].keys()
        return pipes

    def wsn_graph(self, remove_reservoirs=True):
        """
        根据管网模型构建管网有向图
        Args:
            remove_reservoirs: 是否从图结构中删除水库
        Returns:
            g: nx.DiGraph对象，管网模型的图结构，有向图
            adjacency_matrix: np.array，data_type:float32, 管网模型的有向图结构的邻接矩阵
        """
        pipes = self.reverse_pipes()
        g = self.wn.get_graph()
        g_copy = self.wn.get_graph()
        # 调整反向流动的管段，使其方向和水流方法一致
        for edge in g_copy.edges:
            for pipe in pipes:
                if pipe in edge:
                    g.remove_edge(edge[0], edge[1])
                    g.add_edge(edge[1], edge[0])
        # 从图结构中删除水库节点
        if remove_reservoirs:
            for i in range(-self.wn.num_reservoirs, 0, 1):
                g.remove_node(list(g.nodes)[i])
        return g, np.asarray(
            nx.adjacency_matrix(g).todense()).astype(
            np.float32)

    def knn_graph(self):
        """
        根据管网的连接情况，构建knn图。
        规则如下：
                1. 下游路径小于等于2个管段的路径上的所有节点作为邻居节点，产生连接；
                1. 上游路径小于等于2个管段的路径上的所有节点作为邻居节点，产生连接；
        Returns:
            g: nx.DiGraph对象，管网模型的图结构，有向图
            adjacency_matrix: np.array，data_type:float32, knn图邻接矩阵
        """
        g, adj = self.wsn_graph()
        for n, node in enumerate(g.nodes):
            for v in g.successors(node):  # 取下游节点
                adj[n, list(g.nodes).index(v)] = 1
                for sv in g.successors(v):  # 下游节点的节点
                    adj[n, list(g.nodes).index(sv)] = 1
            for s in g.predecessors(node):  # 取上游节点
                adj[n, list(g.nodes).index(s)] = 1
                for ss in g.predecessors(s):  # 取上游节点的节点
                    adj[n, list(g.nodes).index(ss)] = 1
        return g, adj.astype(np.float32)

    def monitor_graph(self, moniters):
        """
        通过监测点id集合创建监测点的有向图邻接矩阵
        Args:
            moniters: 监测点的id集合
        Returns:
            graph: np.array，data_type:float32, 监测点的邻接矩阵，有向图
            ps: list，监测点的坐标
        """
        g, _ = self.wsn_graph()
        pos = nx.get_node_attributes(g, 'pos')
        # 获取监测点集合的坐标
        ps = []
        for node in moniters:
            ps.append(pos[node])
        # 获取监测点集合的有向图邻接矩阵
        graph = np.zeros((len(moniters), len(moniters)))
        for i, s in enumerate(moniters):
            for j, t in enumerate(moniters):
                if nx.has_path(g, s, t):  # 判断两个监测点之间是否存在路径
                    path = nx.dijkstra_path(g, s, t)
                    if len(list(set(moniters) & set(path))
                           ) <= 2:  # 判断两个监测点之间是否存在其他监测点
                        graph[i, j] = 1
        return graph.astype(np.float32), ps

    def cal_g(self, flow_sensors, monitors):
        """
        计算 g: 流量监测点和压力监测点之间的连接关系，shape：N*M; N表示流量监测点的数量，M表示压力监测点的数量.
        Args:
            flow_sensors: 流量监测点的id
            monitors: 压力监测点的id
        """
        graph, _ = self.wsn_graph(remove_reservoirs=False)
        g = np.zeros((len(flow_sensors), len(monitors)), dtype=np.float32)
        for n, i in enumerate(flow_sensors):
            for m, j in enumerate(monitors):
                if nx.has_path(graph, i, j):
                    g[n, m] = 1
        return g

    def index2id(self, indexs):
        """
        根据提供的节点index集合，将其转换成id集合
        Args:
            indexs: 节点index集合
        """
        ids = []
        for i in indexs:
            ids.append(self.wn.node_name_list[i])
        return ids


class MyLocationData(Dataset):
    """
    根据爆管定位模型生成相应的爆管数据集
    """
    def __init__(self, data_file, label_file, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0, is_norm=False):
        """
        设定获取数据的基础参数
        Args:
            data_file: string，存储爆管数据集的文件
            label_file: string,存储标签的文件
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            is_norm: bool, 是否进行归一化，默认值为False
        """
        self.data = pd.read_csv(data_file, header=None).values
        if is_norm:
            normal = StandardScaler()
            self.data = normal.fit_transform(self.data)
        self.label = pd.read_csv(label_file, header=None, index_col=0).values
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data[item*self.time_serious_len:(item+1)*self.time_serious_len, :]
        y = self.label[item][0] - 1
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        x = x.astype(np.float32)
        y = y.astype(np.int64)
        return x, y

    def __len__(self):
        return self.label.shape[0]


class MyLocationBigData(Dataset):
    """
    根据爆管定位模型生成相应的爆管数据集，适用于数据量特别大，一次性无法全部加载进内存
    """

    def __init__(self, data_file, label_file, samples, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0):
        """
        设定获取数据的基础参数
        Args:
            data_file: string，存储爆管数据集的文件
            label_file: string,存储标签的文件
            samples: int, 样本总数（由于采用迭代器的方式处理大数据，需要提前输入样本数）
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
        """
        self.data = pd.read_csv(data_file, header=None, iterator=True)
        self.label = pd.read_csv(label_file, header=None, index_col=0, iterator=True)
        self.samples = samples
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data.get_chunk(self.time_serious_len).values
        y = self.label.get_chunk(1).values[0] - 1
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        x = x.astype(np.float32)
        y = y.astype(np.int64)
        return x, y

    def __len__(self):
        return self.samples


class MyLocationDataQ(Dataset):
    """
    根据爆管定位模型生成相应的爆管数据集
    """
    def __init__(self, data_file, label_file, flow_file, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0, is_norm=False):
        """
        设定获取数据的基础参数
        Args:
            data_file: string，存储爆管数据集的文件
            label_file: string,存储标签的文件
            flow_file: string,存储流量数据的文件
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            is_norm: bool, 是否进行归一化，默认值为False
        """
        self.data = pd.read_csv(data_file, header=None).values
        if is_norm:
            normal = StandardScaler()
            self.data = normal.fit_transform(self.data)
        self.label = pd.read_csv(label_file, header=None, index_col=0).values
        self.qdata = pd.read_csv(flow_file, header=None).values
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data[item*self.time_serious_len:(item+1)*self.time_serious_len, :]
        y = self.label[item][0] - 1
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        q = self.qdata[item*self.time_serious_len:(item+1)*self.time_serious_len, :]
        q = q[start+self.left_offset:self.time_serious_len-start+self.right_offset, :].sum(0)
        x = x.astype(np.float32)
        y = y.astype(np.int64)
        q = q.astype(np.float32)
        return (x, q), y

    def __len__(self):
        return self.label.shape[0]


class MyLocationBigDataQ(Dataset):
    """
    根据爆管定位模型生成相应的爆管数据集，适用于数据量特别大，一次性无法全部加载进内存
    """

    def __init__(self, data_file, label_file, flow_file, samples, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0):
        """
        设定获取数据的基础参数
        Args:
            data_file: string，存储爆管数据集的文件
            label_file: string,存储标签的文件
            flow_file: string,存储流量数据的文件
            samples: int, 样本总数（由于采用迭代器的方式处理大数据，需要提前输入样本数）
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
        """
        self.data = pd.read_csv(data_file, header=None, iterator=True)
        self.label = pd.read_csv(label_file, header=None, index_col=0, iterator=True)
        self.flow = pd.read_csv(flow_file, header=None, iterator=True)
        self.samples = samples
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data.get_chunk(self.time_serious_len).values
        q = self.flow.get_chunk(self.time_serious_len).values
        y = self.label.get_chunk(1).values[0] - 1
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        q = q[start+self.left_offset:self.time_serious_len-start+self.right_offset, :].sum(0)
        x = x.astype(np.float32)
        q = q.astype(np.float32)
        y = y.astype(np.int64)
        return (x, q), y

    def __len__(self):
        return self.samples


class MyAlarmData(Dataset):
    """
    定义预警数据集类
    """
    def __init__(self, norm_data_file, burst_data_file, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0, is_norm=False, is_balance=True):
        """
        note:
            正常数据的标签为 0
            爆管数据的标签为 1
        Args:
            norm_data_file: string，存储正常数据的文件
            burst_data_file: string,存储爆管数据的文件
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            is_norm: bool, 是否进行归一化，默认值为False
        """
        norm = pd.read_csv(norm_data_file, header=None).values
        burst = pd.read_csv(burst_data_file, header=None).values
        samples_norm = int(norm.shape[0] / time_serious_len)
        samples_burst = int(burst.shape[0] / time_serious_len)
        self.samples = np.array([0]*samples_norm + [1]*samples_burst).astype(np.int64)
        self.data = np.concatenate((norm, burst), axis=0).astype(np.float32)
        if is_norm:
            normal = StandardScaler()
            self.data = normal.fit_transform(self.data)
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data[item*self.time_serious_len : (item+1)*self.time_serious_len, :]
        y = self.samples[item]
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        return x, y

    def __len__(self):
        return len(self.samples)


class MyAlarmDataQ(Dataset):
    """
    定义添加流量数据的预警数据集
    """
    def __init__(self, norm_pdata_file, norm_qdata_file, burst_pdata_file, burst_qdata_file, need_time_len=60, time_serious_len=60, left_offset=0, right_offset=0, is_norm=False):
        """
        note:
            正常数据的标签为 0
            爆管数据的标签为 1
        Args:
            norm_pdata_file: string, 存储正常数据的文件
            norm_qdata_file: string, 存储正常流量数据的文件
            burst_pdata_file: string, 存储爆管数据的文件
            burst_qdata_file: string, 存储爆管数据流量的文件
            need_time_len: int,训练数据需要的时间序列长度，必须不大于time_serious_len的值
            time_serious_len: int,时间序列的长度，根据爆管数据集得到
            left_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            right_offset: 爆管时间在整个数据中的偏移，负值表示向左偏移，正值表示向右偏移
            is_norm: bool, 是否进行归一化，默认值为False
        """
        norm_pdata = pd.read_csv(norm_pdata_file, header=None).values
        norm_qdata = pd.read_csv(norm_qdata_file, header=None).values
        burst_pdata = pd.read_csv(burst_pdata_file, header=None).values
        burst_qdata = pd.read_csv(burst_qdata_file, header=None).values
        samples_norm = int(norm_pdata.shape[0] / time_serious_len)
        samples_burst = int(burst_pdata.shape[0] / time_serious_len)
        self.samples = np.array([0]*samples_norm + [1]*samples_burst).astype(np.int64)
        self.data = np.concatenate((norm_pdata, burst_pdata), axis=0).astype(np.float32)
        if is_norm:
            normal = StandardScaler()
            self.data = normal.fit_transform(self.data)
        self.qdata = np.concatenate((norm_qdata, burst_qdata), axis=0).astype(np.float32)
        self.need_time_len = need_time_len
        self.time_serious_len = time_serious_len
        self.left_offset = left_offset
        self.right_offset = right_offset

    def __getitem__(self, item):
        x = self.data[item*self.time_serious_len : (item+1)*self.time_serious_len, :]
        q = self.qdata[item*self.time_serious_len : (item+1)*self.time_serious_len, :]
        y = self.samples[item]
        start = int((self.time_serious_len-self.need_time_len)/2)
        x = x[start+self.left_offset:self.time_serious_len-start+self.right_offset, :]
        q = q[start+self.left_offset:self.time_serious_len-start+self.right_offset, :].sum(0)
        return (x, q), y

    def __len__(self):
        return len(self.samples)


if __name__ == '__main__':
    wn = wntr.network.WaterNetworkModel(r'./inp/苏州60天的.inp')
    flow_sensors = ['11', '48', '53']
    s6 = [456, 349, 405, 130, 70, 220]
    s8 = [349, 220, 130, 70, 422, 405, 402, 452]
    s10 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272]
    s12 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272, 150, 156]
    s20 = [3, 120, 460, 156, 462, 291, 406, 338, 146, 144, 378, 379, 68, 272, 214, 233, 292, 423, 347, 75]
    model = CreateGraph(wn)
    id6 = model.index2id(s6)
    g = model.cal_g(flow_sensors, id6)
    print(g)
