# coding=utf-8
# @author:      ChengJing
# @name:        run_improve_early_warning.py
# @datetime:    2022/2/4 16:19
# @software:    PyCharm
# @description:

import torch
import numpy as np
from torch.utils.data import DataLoader, random_split
from model.improve_location.IRNN import IRNN
from model.improve_location.ICNN import ICNN
from model.improve_location.IFA import IFA
from datas.dataset import MyAlarmDataQ
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss, ConfusionMatrix, ClassificationReport
from ignite.utils import setup_logger

torch.manual_seed(66)
torch.cuda.manual_seed(66)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# 定义topk函数
def topk_accuracy(py, ty, k=1):
    """
    计算topk准确率的函数
    Args:
        py: 预测输出的y值
        ty: y的真值
        k: k值
    """
    _, py = torch.topk(py, k)
    return (torch.sum(py.squeeze() == ty) / ty.shape[0]).item()


# 使用ignite定义的训练过程
def run(
        model,
        optimizer,
        loss_fn,
        train_loader,
        test_loader,
        max_epochs,
        is_save=False,
        fname='model_state_dict.pk'):
    """
    借助ignite库封装的运行函数
    Args:
        model: pytorch模型
        optimizer: 优化器实例
        loss_fn: 损失函数实例
        train_loader: 训练数据集
        test_loader: 测试数据集
        max_epochs: int，最大的训练轮数
        is_save: bool，是否保存模型
        fname: string，模型保存的文件名
    Return:
        log: 每一列分别记录train_accuracy, train_loss, test_accuracy, test_loss的数据
    """
    trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
    val_metrics = {
        "accuracy": Accuracy(device=device),
        "loss": Loss(loss_fn, device=device),
        "cr": ClassificationReport(device=device),
        "cm": ConfusionMatrix(2, device=device)
    }
    evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
    # trainer.logger = setup_logger("trainer")
    # evaluator.logger = setup_logger("evaluator")

    # @trainer.on(Events.ITERATION_COMPLETED)
    # def log_training_iteration(trainer):
    #     print('training: Epoch {}, iteration {}, loss {}'.format(trainer.state.epoch, trainer.state.iteration, trainer.state.output))

    log = torch.zeros((max_epochs, 4))
    @trainer.on(Events.EPOCH_COMPLETED)
    def log_training_results(trainer):
        """
        定义监听函数
        Args:
            trainer: 训练器
        """
        evaluator.run(train_loader)
        metrics = evaluator.state.metrics
        log[trainer.state.epoch-1, 0] = metrics["accuracy"]
        log[trainer.state.epoch-1, 1] = metrics["loss"]
        print(
            "Training Results - Epoch: {}  Avg accuracy: {:.4f} Avg loss: {:.4f}" .format(
                trainer.state.epoch,
                metrics["accuracy"],
                metrics["loss"]))
        # print(metrics['cr'])
        # print(metrics['cm'])

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_validation_results(trainer):
        """
        定义监听函数
        Args:
            trainer: 训练器
        """
        evaluator.run(test_loader)
        metrics = evaluator.state.metrics
        log[trainer.state.epoch-1, 2] = metrics["accuracy"]
        log[trainer.state.epoch-1, 3] = metrics["loss"]
        print(
            "Validation Results - Epoch: {}  Avg accuracy: {:.4f} Avg loss: {:.4f}".format(
                trainer.state.epoch,
                metrics["accuracy"],
                metrics["loss"]))
        # print(metrics['cr'])
        # print(metrics['cm'])

    trainer.run(train_loader, max_epochs=max_epochs)
    if is_save:
        torch.save(model.state_dict(), fname)
    return log


# 使用训练好的模型进行预测任务
def predict(model, fname, data):
    """
    使用训练好的模型进行测试
    Args:
        model: pytorch模型实例
        fname: 保存模型的文件名
        data: 测试数据
    Return:
        返回测试的结果
    """
    model.load_state_dict(torch.load(fname))
    model.eval()
    return model(data)


def config_rnn(g, in_features, gru_hidden, linear_hidden, laq_hidden, out_features, lr, max_epochs, batch_size,  train_norm_pdata_file, train_burst_pdata_file, test_norm_pdata_file, test_burst_pdata_file, train_norm_qdata_file, train_burst_qdata_file, test_norm_qdata_file, test_burst_qdata_file, serious_len, weight=[1, 1], is_split=True, train_size=0.7, is_save=False, fname='rnn_state_dict.pk', is_save_result=True, fname_result='rnn_reselt.csv'):
    """
    配置运行模型所需的参数，并运行模型
    Args:
        g: 流量监测点和压力监测点之间的连接关系，shape：N*M
        in_features: 输入数据的特征
        gru_hidden: gru层的隐含层节点数
        linear_hidden: linear层的隐含层节点数
        laq_hidden: 隐含层节点数
        out_features: 输出数据的特征
        lr: 学习率
        max_epochs: 最大训练轮数
        batch_size: batch的大小
        train_norm_pdata_file: 存储训练正常数据的文件
        train_burst_pdata_file: 存储训练爆管数据的文件
        test_norm_pdata_file: 存储测试正常数据的文件，如果is_split为True，则不需要该参数
        test_burst_pdata_file: 存储测试爆管的文件，如果is_split为True，则不需要该参数
        train_norm_qdata_file: 存储训练正常流量数据的文件
        train_burst_qdata_file: 存储训练爆管流量数据的文件
        test_norm_qdata_file: 存储测试正常流量数据的文件，如果is_split为True，则不需要该参数
        test_burst_qdata_file: 存储测试爆管流量的文件，如果is_split为True，则不需要该参数
        serious_len: 数据的时间长度
        weight: list, 类别权重，用于计算损失
        is_split: 是否将数据拆分成训练集和测试集
        train_size: 训练集的比例
        is_save: bool，是否保存模型
        fname: string，模型保存的文件名
        is_save_result: bool, 默认值为True，是否保存计算结果
        fname_result: 计算结果的文件名
    """
    model = IRNN(g, in_features, gru_hidden, linear_hidden, laq_hidden, out_features)
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    loss_fn = torch.nn.CrossEntropyLoss(weight=torch.tensor(weight, device=device, dtype=torch.float32))
    if is_split:
        data = MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len)
        train_data, test_data = random_split(data, [int(train_size*len(data)), len(data)-int(train_size*len(data))])
        train_loader = DataLoader(train_data, batch_size, shuffle=True)
        test_loader = DataLoader(test_data, batch_size, shuffle=True)
    else:
        train_loader = DataLoader(MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
        test_loader = DataLoader(MyAlarmDataQ(test_norm_pdata_file, test_norm_qdata_file, test_burst_pdata_file, test_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
    log = run(model, optimizer, loss_fn, train_loader, test_loader, max_epochs, is_save, fname)
    if is_save_result:
        np.savetxt(fname_result, log.numpy(), fmt='%.4f', delimiter=',', header='train_accuracy,train_loss,test_accuracy,test_loss', comments='')


def config_cnn(g, sensors, in_channel, hidden_channel1, hidden_channel2, out_channel, hidden_feature, out_features, lr, max_epochs, batch_size, train_norm_pdata_file, train_burst_pdata_file, test_norm_pdata_file, test_burst_pdata_file, train_norm_qdata_file, train_burst_qdata_file, test_norm_qdata_file, test_burst_qdata_file, serious_len, weight=[1, 1], is_split=True, train_size=0.7, is_save=False, fname='cnn_state_dict.pk', is_save_result=True, fname_result='cnn_reselt.csv'):
    """
    配置cnn模型运行所需的参数
    Args:
        g: 流量监测点和压力监测点之间的连接关系，shape：N*M
        sensors: 压力监测点的数量
        in_channel: 输入数据的通道数
        hidden_channel1: 隐含层通道数
        hidden_channel2: 隐含层通道数
        out_channel: 卷积输出通道数
        hidden_feature: 隐含层特征
        out_features: 输出特征
        lr: 学习率
        max_epochs: 最大训练轮数
        batch_size: batch的大小
        train_norm_pdata_file: 存储训练正常数据的文件
        train_burst_pdata_file: 存储训练爆管数据的文件
        test_norm_pdata_file: 存储测试正常数据的文件，如果is_split为True，则不需要该参数
        test_burst_pdata_file: 存储测试爆管的文件，如果is_split为True，则不需要该参数
        train_norm_qdata_file: 存储训练正常流量数据的文件
        train_burst_qdata_file: 存储训练爆管流量数据的文件
        test_norm_qdata_file: 存储测试正常流量数据的文件，如果is_split为True，则不需要该参数
        test_burst_qdata_file: 存储测试爆管流量的文件，如果is_split为True，则不需要该参数
        serious_len: 数据的时间长度
        weight: list, 类别权重，用于计算损失
        is_split: 是否将数据拆分成训练集和测试集
        train_size: 训练集的比例
        is_save: bool，是否保存模型
        fname: string，模型保存的文件名
        is_save_result: bool, 默认值为True，是否保存计算结果
        fname_result: 计算结果的文件名
    """
    model = ICNN(g, sensors, in_channel, hidden_channel1, hidden_channel2, out_channel, hidden_feature, out_features)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    loss_fn = torch.nn.CrossEntropyLoss(weight=torch.tensor(weight, device=device, dtype=torch.float32))
    if is_split:
        data = MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len)
        train_data, test_data = random_split(data, [int(train_size*len(data)), len(data)-int(train_size*len(data))])
        train_loader = DataLoader(train_data, batch_size, shuffle=True)
        test_loader = DataLoader(test_data, batch_size, shuffle=True)
    else:
        train_loader = DataLoader(MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
        test_loader = DataLoader(MyAlarmDataQ(test_norm_pdata_file, test_norm_qdata_file, test_burst_pdata_file, test_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
    log = run(model, optimizer, loss_fn, train_loader, test_loader, max_epochs, is_save, fname)
    if is_save_result:
        np.savetxt(fname_result, log.numpy(), fmt='%.4f', delimiter=',', header='train_accuracy,train_loss,test_accuracy,test_loss', comments='')


def config_fa(g, sensors, in_features, out_features, lr, max_epochs, batch_size, train_norm_pdata_file, train_burst_pdata_file, test_norm_pdata_file, test_burst_pdata_file, train_norm_qdata_file, train_burst_qdata_file, test_norm_qdata_file, test_burst_qdata_file, serious_len, weight=[1,1], is_split=True, train_size=0.7, is_save=False, fname='fa_state_dict.pk', is_save_result=True, fname_result='fa_reselt.csv'):
    """
    配置FADenseNet模型的运行参数
    Args:
        g: 流量监测点和压力监测点之间的连接关系，shape：N*M
        sensors: 压力监测点的数量
        in_features: 输入特征数
        out_features: 输出特征数
        lr: 学习率
        max_epochs: 最大训练轮数
        batch_size: batch的大小
        train_norm_pdata_file: 存储训练正常数据的文件
        train_burst_pdata_file: 存储训练爆管数据的文件
        test_norm_pdata_file: 存储测试正常数据的文件
        test_burst_pdata_file: 存储测试爆管的文件
        train_norm_qdata_file: 存储训练正常流量数据的文件
        train_burst_qdata_file: 存储训练爆管流量数据的文件
        test_norm_qdata_file: 存储测试正常流量数据的文件，如果is_split为True，则不需要该参数
        test_burst_qdata_file: 存储测试爆管流量的文件，如果is_split为True，则不需要该参数
        serious_len: 数据的时间长度
        weight: list, 类别权重，用于计算损失
        is_split: 是否将数据拆分成训练集和测试集
        train_size: 训练集的比例
        is_save: bool，是否保存模型
        fname: string，模型保存的文件名
        is_save_result: bool, 默认值为True，是否保存计算结果
        fname_result: 计算结果的文件名
    """
    model = IFA(g, sensors, in_features*serious_len, out_features)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    loss_fn = torch.nn.CrossEntropyLoss(weight=torch.tensor(weight, device=device, dtype=torch.float32))
    if is_split:
        data = MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len)
        train_data, test_data = random_split(data, [int(train_size*len(data)), len(data)-int(train_size*len(data))])
        train_loader = DataLoader(train_data, batch_size, shuffle=True)
        test_loader = DataLoader(test_data, batch_size, shuffle=True)
    else:
        train_loader = DataLoader(MyAlarmDataQ(train_norm_pdata_file, train_norm_qdata_file, train_burst_pdata_file, train_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
        test_loader = DataLoader(MyAlarmDataQ(test_norm_pdata_file, test_norm_qdata_file, test_burst_pdata_file, test_burst_qdata_file, need_time_len=serious_len), batch_size, shuffle=True)
    log = run(model, optimizer, loss_fn, train_loader, test_loader, max_epochs, is_save, fname)
    if is_save_result:
        np.savetxt(fname_result, log.numpy(), fmt='%.4f', delimiter=',', header='train_accuracy,train_loss,test_accuracy,test_loss', comments='')


if __name__ == '__main__':
    import wntr
    from datas.dataset import CreateGraph

    wn = wntr.network.WaterNetworkModel(r'../datas/inp/tmodel24.inp')
    flow_sensors = ['11', '48', '53']
    s6 = [456, 349, 405, 130, 70, 220]
    cg = CreateGraph(wn)
    id6 = cg.index2id(s6)
    g = cg.cal_g(flow_sensors, id6)
    g = torch.from_numpy(g).to(device)
    config_rnn(
        g=g,
        in_features=6,
        gru_hidden=36,
        linear_hidden=72,
        laq_hidden=20,
        out_features=2,
        lr=0.001,
        max_epochs=1,
        batch_size=128,
        weight=[16, 1],
        train_norm_pdata_file=r"F:\模型数据\new_data\process\raw_data\6_p_norm.csv",
        train_norm_qdata_file=r"F:\模型数据\new_data\process\raw_data\q_norm.csv",
        train_burst_pdata_file=r"F:\模型数据\new_data\process\data_level\P_s6_0.01.csv",
        train_burst_qdata_file=r"F:\模型数据\new_data\process\data_level\Q_s6_0.01.csv",
        is_split=True,
        train_size=0.7,
        test_norm_pdata_file=r"F:\模型数据\test_20_p.csv",
        test_norm_qdata_file='',
        test_burst_pdata_file=r"F:\模型数据\test_20_pl.csv",
        test_burst_qdata_file='',
        serious_len=60
    )

    config_cnn(
        g=g,
        sensors=6,
        in_channel=1,
        hidden_channel1=8,
        hidden_channel2=16,
        out_channel=32,
        hidden_feature=64,
        out_features=2,
        lr=0.001,
        max_epochs=1,
        batch_size=256,
        weight=[16, 1],
        train_norm_pdata_file=r"F:\模型数据\new_data\process\raw_data\6_p_norm.csv",
        train_norm_qdata_file=r"F:\模型数据\new_data\process\raw_data\q_norm.csv",
        train_burst_pdata_file=r"F:\模型数据\new_data\process\data_level\P_s6_0.01.csv",
        train_burst_qdata_file=r"F:\模型数据\new_data\process\data_level\Q_s6_0.01.csv",
        is_split=True,
        train_size=0.7,
        test_norm_pdata_file=r"F:\模型数据\test_20_p.csv",
        test_norm_qdata_file='',
        test_burst_pdata_file=r"F:\模型数据\test_20_pl.csv",
        test_burst_qdata_file='',
        serious_len=60
    )

    config_fa(
        g=g,
        sensors=6,
        in_features=6,
        out_features=2,
        lr=0.001,
        max_epochs=1,
        batch_size=256,
        weight=[16, 1],
        train_norm_pdata_file=r"F:\模型数据\new_data\process\raw_data\6_p_norm.csv",
        train_norm_qdata_file=r"F:\模型数据\new_data\process\raw_data\q_norm.csv",
        train_burst_pdata_file=r"F:\模型数据\new_data\process\data_level\P_s6_0.01.csv",
        train_burst_qdata_file=r"F:\模型数据\new_data\process\data_level\Q_s6_0.01.csv",
        is_split=True,
        train_size=0.7,
        test_norm_pdata_file=r"F:\模型数据\test_20_p.csv",
        test_norm_qdata_file='',
        test_burst_pdata_file=r"F:\模型数据\test_20_pl.csv",
        test_burst_qdata_file='',
        serious_len=60
    )