# coding=utf-8
# @author:      ChengJing
# @name:        process_data.py
# @datetime:    2021/12/14 17:38
# @software:    PyCharm
# @description:

import re
import os
import wntr
import numpy as np
import pandas as pd
from copy import deepcopy  # 传递参数时注意可能出现的浅拷贝和深拷贝引起的错误
from threading import Thread
from multiprocessing import Pool


def nodes_by_diameter(wn, diameter=400, operator='=='):
    """
    根据提供的管径获取节点id集合
    Args:
        wn: wntr水力模型
        diameter: 管径，单位：mm
        operator: string, ['==','<=','>='],操作类型
    Returns:
        nodes: list，管网节点id列表
    """
    nodes = []
    for pipe in wn.pipe_name_list:
        pipe = wn.get_link(pipe)
        if operator == '==':
            if pipe.diameter == diameter / 1000:
                nodes.append(pipe.start_node_name)
                nodes.append(pipe.end_node_name)
        elif operator == '<=':
            if pipe.diameter <= diameter / 1000:
                nodes.append(pipe.start_node_name)
                nodes.append(pipe.end_node_name)
        else:
            if pipe.diameter >= diameter / 1000:
                nodes.append(pipe.start_node_name)
                nodes.append(pipe.end_node_name)
    return nodes


def partition(inp_file=r'./inp/tmodel24.inp', monitor_scheme=r"..\model\cluster\monitor-scheme.xlsx"):
    """
    根据分区结果获得节点id和分区id一一对应的字典数据类型
    Args:
        inp_file: string，inp文件
        monitor_scheme: 监测方案文件
    """
    wn = wntr.network.WaterNetworkModel(inp_file)
    partitions = pd.read_excel(
        monitor_scheme,
        sheet_name='monitor_20')
    partitions_dict = {}
    for i in range(partitions.shape[0]):
        partitions_dict[wn.node_name_list[partitions.iloc[i, 0]]] = partitions.iloc[i, 1]
    return partitions_dict


def process_norm_data(sensors, pfname, pname, qfname, qname, is_saveq=True):
    """
    处理正常数据
    Args:
        sensors: 监测点index集合
        pfname: 正常压力数据文件名
        pname: 保存处理后的压力数据文件名
        qfname: 正常流量数据文件名
        qname: 保存处理后的流量数据文件名
        is_saveq: 是否保存处理后的流量数据文件
    """
    pdf = pd.read_csv(pfname, header=0, index_col=0)
    if is_saveq:
        qdf = pd.read_csv(qfname, header=0, index_col=0)
    for k in range(1381):
        data = pdf.iloc[k:k+60, sensors]
        data.to_csv(pname, header=False, index=False, mode='a')
        if is_saveq:
            qdata = qdf.iloc[k:k + 60, :]
            qdata.to_csv(qname, header=False, index=False, mode='a')


def processnormal(sensors, pfname, pname):
    """
    处理正常数据
    Args:
        sensors: 监测点index集合
        pfname: 正常压力数据文件名
        pname: 保存处理后的压力数据文件名
    """
    pdf = pd.read_csv(pfname, header=0, index_col=0)
    for i in range(14):
        for j in range(16):
            for k in range(1381):
                data = pdf.iloc[k:k+60, sensors] + np.random.normal(0, 0.5, size=(60, len(sensors)))
                data.to_csv(pname, header=False, index=False, mode='a')


class ProcessData:
    """
    按需整理生成的爆管数据，方便后续模型的训练
    """
    def __init__(self, sensors, partitions, data_folder_path, time_serious_len=60):
        """
        设置缓存数据的初始参数
        Args:
            sensors: list，监测点列表，存储监测点的index
            partitions: dict，以键值对的方式存储节点id和节点所属分区的编号
            data_folder_path: string,存储爆管数据的文件路径
            time_serious_len: int,爆管前后的时间长度，根据爆管数据文件设定
        """
        self.sensors = sensors
        self.partitions = partitions
        self.data_folder_path = data_folder_path
        self.time_serious_len = time_serious_len

    def _get_files(self, pattern=r'P_(\w*)_([0-9.]*)\.csv', burst_level=[0.,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.], nodes_by_diameter=None):
        """
        获取满足设定条件的数据文件
        Args:
            pattern: 正则表达式
            burst_level: list, 爆管程度列表
            nodes_by_diameter: list, 满足一定管径要求的节点id集合，默认取值为None，即不限制管径
        Returns:
            pfiles: 满足条件的压力数据文件
            qfiles: 满足条件的流量数据文件
            lqfiles: 标签文件
            nodes: 与文件一一对应的节点的名字
        """
        files = os.listdir(self.data_folder_path)
        pfiles = []
        qfiles = []
        lqfiles = []
        nodes = []
        if nodes_by_diameter:
            for f in files:
                match = re.match(pattern, f)
                if match:
                    if float(match.group(2)) in burst_level:
                        if match.group(1) in nodes_by_diameter:
                            pfiles.append(f)
                            qfiles.append(f.replace('P', 'Q_all'))
                            lqfiles.append(f.replace('P', 'Q'))
                            nodes.append(match.group(1))
        else:
            for f in files:
                match = re.match(pattern, f)
                if match:
                    if float(match.group(2)) in burst_level:
                        pfiles.append(f)
                        qfiles.append(f.replace('P', 'Q_all'))
                        lqfiles.append(f.replace('P', 'Q'))
                        nodes.append(match.group(1))
        return pfiles, qfiles, lqfiles, nodes

    def get_pressure(self, pfiles, nodes, fname, lname):
        """
        根据压力文件列表，生成一个合并后的大文件
        Args:
            pfiles: list，爆管压力数据的文件列表
            nodes: list，与pfiles对应的节点id列表
            fname: string,合并后的文件
            lname: string,合并后的标签文件
        """
        for n, f in enumerate(pfiles):
            print(f'正在整理{f}')
            df = pd.read_csv(
                self.data_folder_path + '/' + f,
                header=0,
                index_col=0)
            df.iloc[:, self.sensors].to_csv(
                fname, header=False, index=False, mode='a')
            samples = int(df.shape[0] / self.time_serious_len)
            node = nodes[n]
            label = self.partitions[node]
            ldata = [[node, label]] * samples
            ldf = pd.DataFrame(ldata)
            ldf.to_csv(lname, header=False, index=False, mode='a')

    def get_flow(self, qfiles, qname, lqfiles=None, lqname=None, is_leak=False):
        """
        根据流量数据文件，生成一个合并后的大文件
        Args:
            qfiles: list，流量监测点的流量数据文件列表
            qname: string,合并后的流量监测点的数据文件
            lqfiles: list，漏损流量的流量数据文件列表
            lqname: string,合并后的漏损流量数据文件
            is_leak: bool，是否同时整理漏损流量的数据，默认值不整理
        """
        if is_leak:
            for f, lf in zip(qfiles, lqfiles):
                print(f'正在整理{f} & {lf}')
                df = pd.read_csv(
                    self.data_folder_path + '/' + f,
                    header=0,
                    index_col=0)
                df.to_csv(qname, header=False, index=False, mode='a')
                ldf = pd.read_csv(
                    self.data_folder_path + '/' + lf,
                    header=0,
                    index_col=0)
                ldf.to_csv(lqname, header=False, index=False, mode='a')
        else:
            for f in qfiles:
                print(f'正在整理{f}')
                df = pd.read_csv(
                    self.data_folder_path + '/' + f,
                    header=0,
                    index_col=0)
                df.to_csv(qname, header=False, index=False, mode='a')

    def multi_thread_process(self, pfiles, nodes, fname, lname, qfiles, qname, lqfiles=None, lqname=None, is_leak=False):
        """
        启用多线程进行流量数据文件和压力数据文件的整理
        Args:
            pfiles: list，爆管压力数据的文件列表
            nodes: list，与pfiles对应的节点id列表
            fname: string,合并后的文件
            lname: string,合并后的标签文件
            qfiles: list，流量监测点的流量数据文件列表
            qname: string,合并后的流量监测点的数据文件
            lqfiles: list，漏损流量的流量数据文件列表
            lqname: string,合并后的漏损流量数据文件
            is_leak: bool，是否同时整理漏损流量的数据，默认值不整理
        """
        t1 = Thread(
            target=self.get_pressure,
            args=(pfiles, nodes, fname, lname))
        t2 = Thread(
            target=self.get_flow,
            args=(qfiles, qname, lqfiles, lqname, is_leak))
        t1.daemon = True
        t2.daemon = True
        t1.start()
        t2.start()
        t1.join()
        t2.join()
        print(f'文件合并成功，请在‘{fname}’查看合并的压力数据，在‘{qname}’查看合并的流量数据')


# 多进程处理数据
def mulprocess(sensors, partitions, data_folder_path, fname, lname, qname, lqname, is_leak=False, time_serious_len=60, pattern=r'P_(\w*)_([0-9.]*)\.csv', burst_level=list(np.arange(0, 2, 0.1)), nodes_by_diameter=None):
    """
    定义的多进程处理函数，方便使用多进程进行数据文件的处理
    Args:
        sensors: list，监测点列表，存储监测点的index
        partitions: dict，以键值对的方式存储节点id和节点所属分区的编号
        data_folder_path: string,存储爆管数据的文件路径
        fname: string,合并后的压力数据文件
        lname: string,合并后的标签文件
        qname: string,合并后的流量监测点的数据文件
        lqname: string,合并后的漏损流量数据文件
        is_leak: bool，是否同时整理漏损流量的数据，默认值不整理
        time_serious_len: int,爆管前后的时间长度，根据爆管数据文件设定
        pattern: 正则表达式
        burst_level: list, 爆管程度列表
        nodes_by_diameter: list, 满足一定管径要求的节点id集合，默认取值为None，即不限制管径
    """

    data = ProcessData(sensors, partitions, data_folder_path, time_serious_len)
    pfiles, qfiles, lqfiles, nodes = data._get_files(pattern, burst_level, nodes_by_diameter)
    data.multi_thread_process(pfiles, nodes, fname, lname, qfiles, qname, lqfiles, lqname, is_leak)


if __name__ == '__main__':
    '''
    爆管数据处理：
    |--控制变量：【监测点的个数】
        |--数据合集(包含各种程度)
        |--依据爆管程度的数据
        |--按照管径【>=400,300,<=200】：
            |--数据合集
            |--依据爆管程度
    正常数据处理：
    |--爆管数据大小：464*47=21808
    |--正常数据集大小：1381 ·· 21808/1381=16
    '''
    wn = wntr.network.WaterNetworkModel(r'./inp/tmodel24.inp')
    partitions_dict = partition()
    s6 = [456, 349, 405, 130, 70, 220]
    s8 = [349, 220, 130, 70, 422, 405, 402, 452]
    s10 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272]
    s12 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272, 150, 156]
    s20 = [3, 120, 460, 156, 462, 291, 406, 338, 146, 144, 378, 379, 68, 272, 214, 233, 292, 423, 347, 75]
    sens = [s6, s8, s10, s12, s20]

    # # 多进程生成预警数据---
    # pp = Pool(10)
    # for sen in sens:
    #     pp.apply_async(processnormal, args=(sen, r'./datas/normal_pressure.csv', f'./datas/process/normal_data/{len(sen)}_data_norm.csv'))
    # save = True
    # for sen in sens:
    #     pp.apply_async(process_norm_data, args=(sen, r'./datas/normal_pressure.csv', f'./datas/process/raw_data/{len(sen)}_p_norm.csv', r'./datas/normal_flow.csv', f'./datas/process/raw_data/q_norm.csv', save))
    #     save = False
    # pp.close()
    # pp.join()
    # # ------------------------

    nodes_400 = nodes_by_diameter(wn, 400, '>=')
    nodes_300 = nodes_by_diameter(wn, 300, '==')
    nodes_200 = nodes_by_diameter(wn, 200, '<=')
    nodes_diameter = [nodes_400, nodes_300, nodes_200]
    diameters = ['400', '300', '200']

    burst_level = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5]
    train_level = [0.2, 0.4, 0.5, 0.6, 0.7, 0.9, 1.0, 1.1, 1.3, 1.4, 1.5]
    test_level = [0.3, 0.8, 1.2]
    mode = [train_level, test_level]
    mode_name = ['train', 'test']

    p = Pool(10)

    parameter = {
        'sensors': 0,  # 待替换参数
        'partitions': partitions_dict,
        'data_folder_path': r'./datas/burst',
        'fname': '',  # 待替换参数
        'lname': '',  # 待替换参数
        'qname': '',  # 待替换参数
        'lqname': None,
        'is_leak': False,
        'time_serious_len': 60,
        'pattern': r'P_(\w*)_([0-9.]*)\.csv',
        'burst_level': 0,  # 待替换参数
        'nodes_by_diameter': None  # 待替换参数
    }
    for s in sens:  # 按照爆管程度整理数据
        for b in burst_level:
            fp = deepcopy(parameter)
            fp['sensors'] = s
            fp['burst_level'] = [b]
            fp['fname'] = f'./datas/process/data_level/P_s{len(s)}_{b}.csv'
            fp['lname'] = f'./datas/process/data_level/L_s{len(s)}_{b}.csv'
            fp['qname'] = f'./datas/process/data_level/Q_s{len(s)}_{b}.csv'
            fp['nodes_by_diameter'] = None
            p.apply_async(mulprocess, kwds=fp)

    for s in sens:  # 按照管径和爆管程度整理数据
        for mm, node in zip(diameters, nodes_diameter):
            for b in burst_level:
                fp = deepcopy(parameter)
                fp['sensors'] = s
                fp['burst_level'] = [b]
                fp['fname'] = f'./datas/process/data_diameter/level/P_s{len(s)}_{mm}_{b}.csv'
                fp['lname'] = f'./datas/process/data_diameter/level/L_s{len(s)}_{mm}_{b}.csv'
                fp['qname'] = f'./datas/process/data_diameter/level/Q_s{len(s)}_{mm}_{b}.csv'
                fp['nodes_by_diameter'] = node
                p.apply_async(mulprocess, kwds=fp)

    for s in sens:  # 数据合集
        for bn, b in zip(mode_name, mode):
            fp = deepcopy(parameter)
            fp['sensors'] = s
            fp['burst_level'] = b
            fp['fname'] = f'./datas/process/data_collection/{bn}_P_s{len(s)}.csv'
            fp['lname'] = f'./datas/process/data_collection/{bn}_L_s{len(s)}.csv'
            fp['qname'] = f'./datas/process/data_collection/{bn}_Q_s{len(s)}.csv'
            fp['nodes_by_diameter'] = None
            p.apply_async(mulprocess, kwds=fp)

    for s in sens:
        for mm, node in zip(diameters, nodes_diameter):
            for bn, b in zip(mode_name, mode):
                fp = deepcopy(parameter)
                fp['sensors'] = s
                fp['burst_level'] = b
                fp['fname'] = f'./datas/process/data_diameter/collection/{bn}_P_s{len(s)}_{mm}.csv'
                fp['lname'] = f'./datas/process/data_diameter/collection/{bn}_L_s{len(s)}_{mm}.csv'
                fp['qname'] = f'./datas/process/data_diameter/collection/{bn}_Q_s{len(s)}_{mm}.csv'
                fp['nodes_by_diameter'] = node
                p.apply_async(mulprocess, kwds=fp)

    p.close()
    p.join()
    print('数据处理!')
