# coding=utf-8
# @author:      ChengJing
# @name:        run.py
# @datetime:    2021/7/11 20:05
# @software:    PyCharm
# @description:

import numpy as np
import pandas as pd
import networkx as nx
from collections import Counter
import wntr
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from model.cluster.SDCN import sdcn
from datas.dataset import CreateGraph
import matplotlib.pyplot as plt
# plt.rcParams['font.sans-serif'] = ['SimSun'] # 指定默认字体
# plt.rcParams['axes.unicode_minus'] = False   #解决负号显示问题

class MyData:
    """
    组织聚类数据集
    """

    def __init__(self, wn, fname):
        self.wn = wn
        self.fname = fname

    def _cluster_data(self, is_knn=False, nodes=None):
        """
        获取聚类数据
        Args:
            is_knn: 是否使用knn图。默认使用管网结构图，不使用knn图
            nodes: list, 子图的节点ID集合
        Returns:
            data: 聚类数据
            adj: 图邻接矩阵
        """
        if is_knn:
            _, adj = CreateGraph(self.wn).knn_graph()
        else:
            g, adj = CreateGraph(self.wn).wsn_graph()
        data = pd.read_csv(self.fname, header=0, index_col=0)
        if nodes != None:
            sg = g.subgraph(nodes)
            adj = np.asarray(nx.adjacency_matrix(sg).todense()).astype(np.float32)
            feature = data[nodes].values.astype(np.float32)
        else:
            feature = data.values.astype(np.float32)
        return feature, adj

    def mean_time(
            self,
            num_day=1440,
            start_day=10,
            end_day=-1,
            is_minmax=True,
            is_knn=False,
            nodes=None):
        """
        将若干天的数据取均值得到一天的数据
        Args:
            num_day: 一天包含的数据量
            start_day: 开始天的索引，从0开始计数
            end_day: 结束天的索引
            is_minmax: 是否对数据进行归一化处理
            is_knn: 是否使用knn图。默认使用管网结构图，不适用knn图
            nodes: list, 子图的节点ID集合
        Returns:
            data: 处理后的一天的数据
        """
        data, adj = self._cluster_data(is_knn, nodes)
        num, nodes = data.shape
        n = int(num / num_day)
        if end_day == -1:
            end_day = n
        mean_data = np.zeros((num_day, nodes))
        for i in range(end_day):
            if n >= start_day:
                mean_data += data[i * num_day:(i + 1) * num_day, :]
        mean_data /= (end_day - start_day)
        if is_minmax:
            process = MinMaxScaler()
            mean_data = process.fit_transform(mean_data)
        return mean_data.astype(np.float32), adj


def plot_area(wn, title, node_attribute, fname):
    """
    绘制分区结果图
    Args:
        wn: 管网wntr模型
        title: 图片标题
        node_attribute: dict, 节点属性值（key：node_id，value：node_attribute）
        fname: 保存图片的名字
    """
    wntr.graphics.plot_network(wn,
                               title=title,
                               node_attribute=node_attribute,
                               add_colorbar=True,
                               node_colorbar_label='Area',
                               node_size=30)
    # plt.savefig(fname)
    plt.show()


def plot_infomap(wn, fname, figname):
    """
    根据infomap计算的clu绘制infomap结果
    Args:
        wn: 管网模型
        fname: 存储clu结果的文件名
        figname: 保存infomap结果的图片文件名
    """
    data = np.loadtxt(fname)
    if data.shape[0] == wn.num_nodes:
        atr = {}
        for i in range(data.shape[0]):
            atr[wn.node_name_list[int(data[i, 0])]] = int(data[i, 1])
    else:
        atr = {}
        for i in range(data.shape[0]):
            atr[wn.node_name_list[int(data[i, 0])]] = int(data[i, 1])
        for j in range(wn.num_reservoirs):
            atr[wn.node_name_list[-(j+1)]] = -1
    plot_area(wn, 'infomap', atr, figname)


def adj2link(adj):
    """
    由邻接矩阵生成link列表
    Args:
        adj: 邻接矩阵
    Returns:
        links：link列表
    """
    link = []
    for i in range(adj.shape[0]):
        for j in np.where(adj[i, :] == 1)[0]:
            link.append([i, j])
    return link


def nodes_from_index(wn, fname, label_id):
    """
    根据infomap计算的clu结果，返回特定类别的节点id集合
    Args:
        wn: 水力模型
        fname: 保存infomap计算结果（clu格式）的文件名
        label_id: int，类别编号
    Returns:
        nodes: 节点id集合
        indexs: 节点index集合
    """
    data = np.loadtxt(fname)
    index = np.where(data[:, 2] == label_id)[0]
    nodes = []
    indexs = []
    for i in index:
        nodes.append(wn.node_name_list[int(data[i, 0])])
        indexs.append(int(data[i, 0]))
    return nodes, indexs


def nodes_lengths(nodes):
    """
    根据节点id集合获取该节点集合组成区域的总管长
    Args:
        nodes: 节点id集合
    Returns:
        length: 节点id集合的总管长
    """
    length = 0
    for node in nodes:
        for link in wn.get_links_for_node(node, flag='OUTLET'):
            length += wn.get_link(link).length
    return length


def monitor(wn, nodes):
    df = pd.read_excel('area.xlsx', sheet_name='area')
    ids = []
    for i in nodes:
        for j in df['node_index'][df['SDCN_class'] == i].values.reshape(-1):
            ids.append(wn.node_name_list[j])
    datas, adj = MyData(
        wn, fname='../../../datas/datas/normal_pressure.csv').mean_time(nodes=ids)
    data = datas.T
    model = KMeans(n_clusters=1, max_iter=1000, n_init=20, tol=1e-10)
    model.fit(data)
    min_index = np.linalg.norm(data - model.cluster_centers_, axis=1).argmin()
    mins = np.where(np.linalg.norm(data - model.cluster_centers_, axis=1) == np.linalg.norm(data - model.cluster_centers_, axis=1)[min_index])
    if len(mins) > 1:
        print('不止一个中心%s'%nodes)
    return ids[min_index]


if __name__ == '__main__':
    # flag == 1表示运行苏州60天的模型；flag取其他值意味着运行算例
    flag = 1
    if flag == 1:
        inp = '../../../datas/inp/model.inp'
        wn = wntr.network.WaterNetworkModel(inp)
        # plot_infomap(wn, 'infomap.txt', 'infomap.png')
        nodes, indexs = nodes_from_index(wn, 'infomap.txt', 10)
        datas, adj = MyData(
            wn, fname='../../../datas/datas/normal_pressure.csv').mean_time()
        data = datas.T
        # data = np.ones_like(datas.T, dtype=np.float32)
        # print(data)
    #     # link = adj2link(adj)
    #     # np.savetxt('link.txt', link, delimiter=' ', fmt='%d')
    #     adj = adj + np.eye(adj.shape[0])
    #     adj = adj.astype(np.float32)
    #
        # cluster_model = 'kmeans'
        cluster_model = 'sdcn'

        if cluster_model == 'kmeans':
            k_model = KMeans(n_clusters=6, max_iter=1000, n_init=30, tol=1e-10)
            k_model.fit(data)
            label = k_model.predict(data)
            label = list(label)
            print(Counter(label))
            print(label)
            # np.savetxt('12_area.csv', np.array(label).reshape(-1, 1), fmt='%d', delimiter=',')
            atr = np.zeros((wn.num_nodes,))
            atr[:-3] = label
            plot_area(wn, 'kmeans', dict(
                zip(wn.node_name_list, atr)), fname='kmeans.png')
        else:
            # data, adj, n_input, n_z, n_clusters,
            # n_enc_1,n_enc_2,n_enc_3
            # n_dec_1,n_dec_2,n_dec_3
            param = [1440, 128, 20, 512, 128, 128, 128, 128, 512]
            sdcn.train_sdcn(data, adj, param[0], param[1], param[2],
                            param[3], param[4], param[5],
                            param[6], param[7], param[8],
                            save_path=r'sdcn.pkl', lr=0.001, epoch=340, is_writer=False)
            pre, label = sdcn.predict(data, adj, model_path=r'sdcn.pkl')
            print(Counter(label))
            label = list(label)
            print(label)
    #         np.savetxt('12_area.csv',np.array(label).reshape(-1,1),fmt='%d',delimiter=',')
    #         for i in range(wn.num_reservoirs):
    #             label.append(-1)
            atr = np.zeros((wn.num_nodes,))
            atr[:-3] = label
            plot_area(wn, 'sdcn', dict(
                zip(wn.node_name_list, atr)), fname='sdcn.png')
    else:
        wn = wntr.network.WaterNetworkModel('model1.inp')
        sim = wntr.sim.WNTRSimulator(wn)
        result = sim.run_sim()
        data = result.node['pressure'].iloc[:-1, :-1].values
        data = data.T
        data = data.astype(np.float32)
        pre = MinMaxScaler()
        data = pre.fit_transform(data)
        _, adj = CreateGraph(wn).wsn_graph()
        # link = adj2link(adj)
        # np.savetxt('model_links.txt', link, fmt='%d', delimiter=' ')
        plot_infomap(wn, 'model_infomap.txt', 'model_infomap.png')
        adj = adj + np.eye(adj.shape[0])
        adj = adj.astype(np.float32)

        # cluster_model = 'kmeans'
        cluster_model = 'sdcn'

        if cluster_model == 'kmeans':
            k_model = KMeans(n_clusters=2, n_init=20)
            k_model.fit(data)
            label = k_model.predict(data)
            label = list(label)
            for i in range(wn.num_reservoirs):
                label.append(-1)
            plot_area(wn, 'kmeans', dict(
                zip(wn.node_name_list, label)), fname='model_kmeans.png')
        else:
            # data, adj, n_input, n_z, n_clusters,
            # n_enc_1,n_enc_2,n_enc_3
            # n_dec_1,n_dec_2,n_dec_3
            param = [24, 64, 2, 256, 128, 128, 128, 128, 256]
            sdcn.train_sdcn(data, adj, param[0], param[1], param[2],
                            param[3], param[4], param[5],
                            param[6], param[7], param[8],
                            save_path=r'sdcn.pkl', epoch=145)
            pre, label = sdcn.predict(data, adj, model_path=r'sdcn.pkl')
            print(Counter(label))
            label = list(label)
            #         np.savetxt('12_area.csv',np.array(label).reshape(-1,1),fmt='%d',delimiter=',')
            for i in range(wn.num_reservoirs):
                label.append(-1)
            # plot_area(wn, 'sdcn', dict(
            #     zip(wn.node_name_list, label)), fname='model_sdcn.png')  