# coding=utf-8
# @author:      ChengJing
# @name:        run_location_node.py
# @datetime:    2022/2/16 17:12
# @software:    PyCharm
# @description:

import os
from pickletools import optimize
import re
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from model.location.RNN_model import MyRNN
import torch
from torch.utils.data import DataLoader, TensorDataset
import wntr


class MyDataSet:
    """
    爆管定位到节点的数据集
    """
    def __init__(self, root, partition, sensors, burst_level):
        """
        Args:
            root: str，存放爆管数据的文件夹
            partition: int，分区编号
            sensors: list，监测点index集合
            burst_level: list，爆管程度集合
        """
        self.root = root
        self.partition = partition
        self.sensors = sensors
        self.burst_level = burst_level

    def _get_nodes(self, file=r".\cluster\monitor-scheme.xlsx"):
        """
        获取符合条件的节点id集合
        """
        df = pd.read_excel(file, sheet_name='monitor_20')
        index = df[df['SDCN_class'] == self.partition]['node_index'].values
        wn = wntr.network.WaterNetworkModel(r'../datas/inp/tmodel24.inp')
        nodes = []
        for i in index:
            nodes.append(wn.node_name_list[i])
        return nodes, len(nodes)

    def get_data(self):
        """
        根据节点id集合读取数据
        """
        files = os.listdir(self.root)
        nodes, _ = self._get_nodes()
        x = []
        y = []
        for i, n in enumerate(nodes):
            pattern = f'P_{n}_([0-9.]*).csv'
            for f in files:
                match = re.match(pattern, f)
                if match:
                    if float(match.group(1)) in self.burst_level:
                        df = pd.read_csv(self.root+'/'+f, header=0, index_col=0)
                        samples = int(df.shape[0]/60)
                        for j in range(samples):
                            x.append(df.values[j*60:j*60+60, self.sensors].reshape(-1))
                            y.append(i)
        return x, y

    def get_data_gru(self):
        files = os.listdir(self.root)
        nodes, out = self._get_nodes()
        x = []
        y = []
        for i, n in enumerate(nodes):
            pattern = f'P_{n}_([0-9.]*).csv'
            for f in files:
                match = re.match(pattern, f)
                if match:
                    if float(match.group(1)) in self.burst_level:
                        df = pd.read_csv(self.root+'/'+f, header=0, index_col=0)
                        samples = int(df.shape[0]/60)
                        for j in range(samples):
                            x.append(df.values[j*60:j*60+60, self.sensors])
                            y.append(i)
        return x, y, out



class gru:
    def __init__(self, in_features) -> None:
        x, y, out = MyDataSet(r'F:\模型数据\代码\datas\datas\burst', 15, s6, [0.2]).get_data_gru()
        self.x = TensorDataset(torch.tensor(x, dtype=torch.float32))
        self.y = torch.tensor(y, dtype=torch.int64)
        self.dataloader = DataLoader(self.x, self.y, batch_size=128)
        self.model = MyRNN(in_features, 48, 96, out)

    def fit(self, epoch=100):
        optimize = torch.optim.Adam(self.model.parameters(), lr=0.001)
        loss_f = torch.nn.CrossEntropyLoss()
        for e in range(epoch):
            for tx, ty in self.dataloader:
                py = self.model(tx)
                loss = loss_f(py, ty)
                optimize.zero_grad()
                loss.backward()
                optimize.step()
            print(f'{e}-Train accuracy: {accuracy_score(self.y, self.predict(self.x)):.6f}')
	
    def predict(self, x):
        return self.model(x).detach().numpy().argmax(axis=1)



if __name__ == '__main__':
    s6 = [456, 349, 405, 130, 70, 220]
    s8 = [349, 220, 130, 70, 422, 405, 402, 452]
    s10 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272]
    s12 = [349, 220, 130, 70, 422, 405, 402, 452, 30, 272, 150, 156]
    s20 = [3, 120, 460, 156, 462, 291, 406, 338, 146, 144, 378, 379, 68, 272, 214, 233, 292, 423, 347, 75]

    # data = MyDataSet(r'F:\模型数据\代码\datas\datas\burst', 15, s6, [0.2])
    # _, out = data._get_nodes()
    # x, y = data.get_data()

    # x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8)
    # model = RandomForestClassifier()
    # model = DecisionTreeClassifier(max_depth=20)
    # model = SVC()
    # model = SGDClassifier()
    # model = MLPClassifier((128,256,314,888,666,204,96,48,20))
    # model.fit(x_train, y_train)
    # print('Train accuracy: %.6f' % accuracy_score(y_train, model.predict(x_train)))
    # print('Test accuracy: %.6f' % accuracy_score(y_test, model.predict(x_test)))

    model = gru(6)
    model.fit()