import os

import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm

from models.model import *
from models.utils import *


class ModelTrain:
    """
    模型训练类，用于后端的模型重新训练任务。
    """

    def __init__(self, model_name: str, train_data_name: str, valid_data_name="", seed = 1024, **kwargs) -> None:
        """
        初始化模型训练实例.
        :params: model_name 用户传入的本次训练的模型名称
        :params: train_data_name: 用户传入的训练集名称
        :params: valid_data_name: 用户传入的验证集名称. 若无则从训练集分出10%作为验证集
        :params: seed 随机种子, 默认1024
        :params: kwargs 用户传入的其它超参数，具体如下
                lr(float): 学习率
                l1_penalty(float): L1正则化系数 
                l2_penalty(float): L2正则化系数  
                epochs(int): 训练轮数
                loss_weight(list): 损失函数权值数组
                step_size(int): 学习率调整步长
                gamma(float): 学习率衰减率
                momentum(float): 动量
        """
        self.__device = "cuda" if torch.cuda.is_available() else "cpu"  # 使用cpu/gpu
        self.__seed = seed
        set_seed(self.__seed)
        self.__net = Transformer(in_features=107, out_features=6).to(
            self.__device)  # 训练使用的网络
        self.__params_dict = self.__init_params_dict(
            model_name, train_data_name, kwargs, valid_data_name)  # 超参数字典
        self.__optim = torch.optim.SGD(self.__net.parameters(
        ), lr=self.__params_dict["lr"], momentum=self.__params_dict["momentum"])  # 优化器
        self.__scheduler = torch.optim.lr_scheduler.StepLR(
            self.__optim, step_size=self.__params_dict["step_size"], gamma=self.__params_dict["gamma"])  # 学习率衰减策略
        self.__train_loader = None  # 训练集DataLoader
        self.__valid_loader = None  # 验证集DataLoader
        # self.__writer = SummaryWriter()  # TensorBoard
        self.__train_loss_curve = []  # 训练集损失函数曲线
        self.__valid_loss_curve = []  # 验证集损失函数曲线
        self.___train_score_curve = []  # 训练集F1曲线
        self.__valid_score_curve = []  # 验证集F1曲线

    def __init_params_dict(self, model_name: str, train_data_name: str, kwargs: dict, valid_data_name="") -> None:
        """
        初始化超参数字典
        :params: model_name 用户传入的本次训练的模型名称
        :params: train_data_name: 用户传入的训练集名称
        :params: kwargs 需要调整的参数字典。可以调整的参数有
                lr(float): 学习率
                l1_penalty(float): L1正则化系数 
                l2_penalty(float): L2正则化系数  
                epochs(int): 训练轮数
                loss_weight(list): 损失函数权值字典
                step_size(int): 学习率调整步长
                gamma(float): 学习率衰减率
                momentum(float): 动量
        :params: valid_data_name: 用户传入的验证集名称. 若无则从训练集分出10%作为验证集
        :returns: 超参数字典
        """
        default_dict = {
            # 可调整的超参数
            "lr": 0.05,
            "l1_penalty": 6e-3,
            "l2_penalty": 1e-6,
            "epochs": 800,
            "loss_weight": [1 / 5144, 1 / 1613, 1 / (1062 * 3), 1 / 844, 1 / 743, 1 / 554],
            "step_size": 400,
            "gamma": 0.7,
            "momentum": 0.9,
            # 不可调整的超参数
            "params_save_path": os.path.join("static/params", model_name + ".pth"),
            "pic_save_path": "static/pics",
            "train_data_path": os.path.join("upload/train_datasets", train_data_name.replace(".csv", "") + ".csv"),
            "valid_data_path": os.path.join("upload/train_datasets",
                                            valid_data_name.replace(".csv",
                                                                    "") + ".csv") if valid_data_name != "" else "",
        }

        default_dict.update(kwargs)
        return default_dict

    def __load_dataset(self) -> None:
        """
        加载数据集并转换成DataLoader.
        如果用户传入了验证集, 则将用户传入的验证集作为验证集, 否则从训练集中分出10%作为验证集.
        """
        # 获取原始数据集路径
        set_seed(self.__seed)
        train_data_path = self.__params_dict["train_data_path"]
        valid_data_path = self.__params_dict["valid_data_path"]
        if not os.path.exists(train_data_path):
            raise FileNotFoundError
        # 读取训练集
        train_dataset = pd.read_csv(train_data_path, index_col=0, header=0)
        # 分割特征和标签
        x_train, y_train = train_dataset.drop(
            "label", axis=1).values, train_dataset["label"].values
        # 用户未传入验证集
        if valid_data_path == "" or not os.path.exists(valid_data_path):
            x_train, x_valid, y_train, y_valid = train_test_split(
                x_train, y_train, test_size=0.1)
        else:  # 用户传入验证集
            valid_dataset = pd.read_csv(valid_data_path, index_col=0, header=0)
            x_valid, y_valid = valid_dataset.drop(
                "label", axis=1).values, valid_dataset["label"].values
        # 数据预处理
        x_train, x_valid, y_train, y_valid = data_preprocess(
            x_train, x_valid, y_train, y_valid, seed=self.__seed)
        # 转换成TensorDataset
        train_dataset = TensorDataset(
            torch.from_numpy(x_train), torch.from_numpy(y_train))
        valid_dataset = TensorDataset(
            torch.from_numpy(x_valid), torch.from_numpy(y_valid))
        # 转换成DataLoader
        self.__train_loader = DataLoader(
            train_dataset, batch_size=len(train_dataset), shuffle=True)
        self.__valid_loader = DataLoader(
            valid_dataset, batch_size=len(train_dataset))

    def __reset_net(self) -> None:
        """
        重置网络
        """
        set_seed(self.__seed)
        del self.__net
        del self.__optim
        del self.__scheduler
        self.__net = Transformer(in_features=107, out_features=6).to(
            self.__device)  # 训练使用的网络
        self.__optim = torch.optim.SGD(self.__net.parameters(
        ), lr=self.__params_dict["lr"], momentum=self.__params_dict["momentum"])  # 优化器
        self.__scheduler = torch.optim.lr_scheduler.StepLR(
            self.__optim, step_size=self.__params_dict["step_size"], gamma=self.__params_dict["gamma"])  # 学习率衰减策略
        self.__train_loss_curve = []  # 训练集损失函数曲线
        self.__valid_loss_curve = []  # 验证集损失函数曲线
        self.___train_score_curve = []  # 训练集F1曲线
        self.__valid_score_curve = []  # 验证集F1曲线

    def __get_loss(self, y_pred, y_true):
        """
        根据预测值和真实值计算损失.
        损失包括交叉熵损失,L1正则化损失和L2正则化损失.
        :params: y_pred 标签预测值
        :params: y_true 标签真实值
        :returns: 损失值
        """
        weight = torch.tensor(
            self.__params_dict["loss_weight"]).to(self.__device)
        loss_fn, l1_loss, l2_loss = nn.CrossEntropyLoss(weight), 0., 0.
        loss = loss_fn(y_pred, y_true.to(torch.long))
        # 计算L1正则化损失和L2正则化损失
        for param in self.__net.parameters():
            l1_loss += torch.norm(param, 1)
            l2_loss += torch.norm(param, 2)
        return loss + self.__params_dict["l1_penalty"] * l1_loss + self.__params_dict["l2_penalty"] * l2_loss

    def __save_net_stats(self) -> None:
        """
        存储网络参数
        """
        path = self.__params_dict["params_save_path"]
        # 创建检查点
        check_point = {
            "net_stat_dict": self.__net.state_dict(),
            "params_dict": self.__params_dict
        }
        torch.save(check_point, path)

    def train(self):
        """
        模型重新训练。
        :returns: 元组((最小验证损失, 最小验证损失位置), (最小训练损失, 最小训练损失位置),(最大验证F1, 最大验证F1位置), (最大训练F1, 最大训练F1位置))
        """
        set_seed(self.__seed)
        self.__load_dataset()
        self.__reset_net()
        train_loader = self.__train_loader
        valid_loader = self.__valid_loader
        for epoch in tqdm(range(self.__params_dict["epochs"])):

            train_loss_mean = 0.
            train_score_mean = 0.

            self.__net.train()
            for data in train_loader:
                inputs, labels = data
                inputs = inputs.to(torch.float32).to(self.__device)
                labels = labels.to(torch.float32).to(self.__device)
                # 前向传播
                y_pred = self.__net(inputs)
                # 计算 loss 和 f1
                loss = self.__get_loss(y_pred, labels)
                predicted = probs2preds(y_pred.data)
                f1 = f1_score(labels.cpu(), predicted.cpu(), average='macro')
                train_loss_mean += loss
                train_score_mean += f1
                # 反向传播
                loss.backward()
                # 更新参数
                self.__optim.step()
                # # 写入TensorBoard
                # self.__writer.add_scalars(
                #     'Loss', {"TrainLoss": loss}, global_step=epoch)
                # self.__writer.add_scalars(
                #     'F1', {"TrainF1": f1}, global_step=epoch)
                # for name, param in self.__net.named_parameters():
                #     if param.grad is not None:
                #         self.__writer.add_histogram(
                #             name, param.grad, global_step=epoch)
                # 清空梯度
                self.__optim.zero_grad()

            # 学习率衰减
            self.__scheduler.step()

            self.__train_loss_curve.append(
                train_loss_mean.item() / len(train_loader))
            self.___train_score_curve.append(
                train_score_mean.item() / len(train_loader))

            # 验证
            self.__net.eval()
            with torch.no_grad():
                valid_loss_mean = 0.
                valid_score_mean = 0.
                confusion_mat = None
                for data in valid_loader:
                    # 前向传播
                    inputs, labels = data
                    inputs = inputs.to(torch.float32).to(self.__device)
                    labels = labels.to(torch.float32).to(self.__device)
                    y_pred = self.__net(inputs)
                    loss = self.__get_loss(y_pred, labels)
                    predicted = probs2preds(y_pred.data)
                    f1 = f1_score(labels.cpu(),
                                  predicted.cpu(), average='macro')
                    confusion_mat = confusion_matrix(
                        labels.cpu(), predicted.cpu(), labels=list(range(6)))
                    valid_loss_mean += loss
                    valid_score_mean += f1
                    # self.__writer.add_scalars(
                    #     'Loss', {"ValidLoss": loss}, global_step=epoch)
                    # self.__writer.add_scalars(
                    #     'F1', {"ValidF1": f1}, global_step=epoch)
                self.__valid_loss_curve.append(
                    valid_loss_mean.item() / len(valid_loader))
                self.__valid_score_curve.append(
                    valid_score_mean.item() / len(valid_loader))

            if self.__valid_score_curve[-1] == max(self.__valid_score_curve):
                self.__save_net_stats()
                self.__plot_confusion_matrix(confusion_mat)

        # self.__writer.close()

        return self.__plot_loss(), self.__plot_score()

    def __plot_loss(self) -> tuple:
        """
        绘制Loss随训练轮数变化的曲线
        :returns: 元组(最小验证损失, 最小验证损失位置), (最小训练损失, 最小训练损失位置)
        """
        assert len(self.__train_loss_curve) != 0, "未进行训练。"
        # 绘图参数设置
        matplotlib.use("Agg")
        # plt.style.use(["science", "grid", "no-latex", "cjk-sc-font"])
        plt.rcParams["font.sans-serif"] = ['KaiTi']
        plt.rcParams["axes.unicode_minus"] = False
        plt.rcParams["figure.figsize"] = (15.2, 8.8)
        fontdict = {"fontsize": 20}
        title = "训练集和验证集的损失函数值"
        xlabel = "训练轮数"
        ylabel = "loss"
        legend_list = ["训练集", "验证集"]
        x = list(range(self.__params_dict["epochs"]))
        min_train_loss = min(self.__train_loss_curve)
        min_train_loss_index = self.__train_loss_curve.index(min_train_loss)
        min_valid_loss = min(self.__valid_loss_curve)
        min_valid_loss_index = self.__valid_loss_curve.index(
            min_valid_loss)

        # 绘图
        plt.title(title, fontdict=fontdict)
        plt.plot(x, self.__train_loss_curve, "g")
        plt.plot(x, self.__valid_loss_curve, "purple")
        plt.scatter(min_train_loss_index, min_train_loss, c="b", marker="*")
        plt.scatter(min_valid_loss_index,
                    min_valid_loss, c="b", marker="*")
        plt.xlabel(xlabel, fontdict=fontdict)
        plt.ylabel(ylabel, fontdict=fontdict)
        plt.legend(legend_list)
        plt.savefig(os.path.join(
            self.__params_dict["pic_save_path"], title + ".svg"))
        plt.clf()
        return (min_valid_loss, min_valid_loss_index), (min_train_loss, min_train_loss_index)

    def __plot_score(self) -> tuple:
        """
        绘制F1值随训练轮数变化的曲线
        :returns: 元组(最大验证F1, 最大验证F1位置), (最大训练F1, 最大训练F1位置)
        """
        assert len(self.___train_score_curve) != 0, "未进行训练。"
        # 绘图参数设置
        matplotlib.use("Agg")
        # plt.style.use(["science", "grid", "no-latex", "cjk-sc-font"])
        plt.rcParams["font.sans-serif"] = ['KaiTi']
        plt.rcParams["axes.unicode_minus"] = False
        plt.rcParams["figure.figsize"] = (15.2, 8.8)
        fontdict = {"fontsize": 20}
        title = "训练集和验证集的F1值"
        xlabel = "训练轮数"
        ylabel = "F1值"
        legend_list = ["训练集", "验证集"]
        x = list(range(self.__params_dict["epochs"]))
        max_train_f1 = max(self.___train_score_curve)
        max_train_f1_index = self.___train_score_curve.index(max_train_f1)
        max_valid_f1 = max(self.__valid_score_curve)
        max_valid_f1_index = self.__valid_score_curve.index(max_valid_f1)

        # 绘图
        plt.title(title, fontdict=fontdict)
        plt.plot(x, self.___train_score_curve, "g")
        plt.plot(x, self.__valid_score_curve, "purple")
        plt.scatter(max_train_f1_index, max_train_f1, c="b", marker="*")
        plt.scatter(max_valid_f1_index, max_valid_f1, c="b", marker="*")
        plt.xlabel(xlabel, fontdict=fontdict)
        plt.ylabel(ylabel, fontdict=fontdict)
        plt.legend(legend_list)
        plt.savefig(os.path.join(
            self.__params_dict["pic_save_path"], title + ".svg"))
        plt.clf()
        print(
            f"valid max f1 score:{max(self.__valid_score_curve)} position:{self.__valid_score_curve.index(max(self.__valid_score_curve))}")
        print(
            f"train max f1 score:{max(self.___train_score_curve)} position:{self.___train_score_curve.index(max(self.___train_score_curve))}")
        return (max_valid_f1, max_valid_f1_index), (max_train_f1, max_train_f1_index)

    def __plot_confusion_matrix(self, confusion_mat) -> None:
        """
        绘制混淆图
        """
        assert len(confusion_mat) != 0, "未进行训练。"
        # 绘图参数设置
        matplotlib.use("Agg")
        # plt.style.use(["science", "grid", "no-latex", "cjk-sc-font"])
        plt.rcParams["font.sans-serif"] = ['KaiTi']
        plt.rcParams["axes.unicode_minus"] = False
        plt.rcParams["figure.figsize"] = (15.2, 8.8)
        plt.rcParams['axes.formatter.use_mathtext'] = True
        plt.rcParams['axes.formatter.limits'] = (-5, 5)
        plt.rcParams['axes.formatter.useoffset'] = False
        fontdict = {"fontsize": 20}
        title = "验证集最优混淆图"

        # 绘图
        plt.title(title, fontdict=fontdict)
        sns.heatmap(pd.DataFrame(confusion_mat), cmap="Greens", annot=True)
        plt.savefig(os.path.join(
            self.__params_dict["pic_save_path"], title + ".svg"))
        plt.clf()


class ModelEval:
    """
    模型测试类，用于后端预测故障类型的任务
    """

    def __init__(self, model_name: str, test_data_name: str, seed = 1024) -> None:
        """
        初始化模型训练实例
        :params: model_name 测试使用的模型名称
        :params: test_data_name 用户传入的测试集名称
        :params: seed 随数种子
        """
        self.__seed = seed
        set_seed(self.__seed)
        self.__device = "cuda" if torch.cuda.is_available() else "cpu"
        self.__params_dict = self.__load_params_dict(model_name)
        self.__net = Transformer(107, 6).to(self.__device)
        self.__load_state_dicts(model_name, test_data_name)
        self.__test_loader = None

    def __load_params_dict(self, model_name: str) -> dict:
        """
        加载超参数字典
        :params: model_name 测试使用的模型名称
        :returns: 超参数字典
        """
        params_dict_path = os.path.join("static/params", model_name + ".pth")
        if not os.path.exists(params_dict_path):
            raise FileNotFoundError
        check_point = torch.load(params_dict_path, map_location=torch.device('cpu'))
        return check_point["params_dict"]

    def __load_state_dicts(self, model_name: str, test_data_name: str) -> None:
        """
        加载已训练的模型状态
        :params: model_name 测试使用的模型名称
        :params: test_data_name 用户传入的测试集名称
        """
        params_dict_path = os.path.join("static/params", model_name + ".pth")
        if not os.path.exists(params_dict_path):
            raise FileNotFoundError
        check_point = torch.load(params_dict_path, map_location=torch.device('cpu'))
        self.__net.load_state_dict(check_point["net_stat_dict"])
        self.__params_dict = check_point["params_dict"]
        self.__params_dict["test_data_path"] = os.path.join(
            "upload/test_datasets", test_data_name.replace(".csv", "") + ".csv")
        self.__params_dict["result_save_path"] = os.path.join(
            "static/jsons/test_data.json")

    def __load_dataset(self) -> None:
        """
        加载测试集并转换成DataLoader.
        """
        train_data_path = self.__params_dict["train_data_path"]
        test_data_path = self.__params_dict["test_data_path"]
        if not os.path.exists(test_data_path) or not os.path.exists(train_data_path):
            raise FileNotFoundError
        # 加载训练集的DataLoader，用于初始化
        train_dataset = pd.read_csv(train_data_path, index_col=0, header=0)
        x_train, _ = train_dataset.drop(
            "label", axis=1).values, train_dataset["label"].values
        # 加载测试集的DataLoader
        test_dataset = pd.read_csv(test_data_path, index_col=0, header=0)
        # 注意，正式运行时换成下面一行的代码
        # x_test, y_test = test_dataset.drop(
        #     "label", axis=1).values, test_dataset["label"].values
        x_test = test_dataset.values
        x_train, x_test, _, _ = data_preprocess(
            x_train, x_test, np.array([]), np.array([]), seed=self.__seed)
        test_dataset = TensorDataset(torch.from_numpy(x_test))
        self.__test_loader = DataLoader(
            test_dataset, batch_size=len(train_dataset))

    def predict(self) -> tuple:
        """
        进行预测并保存结果为json文件, 返回json文件及其地址
        :returns: (json格式的结果, 保存json文件的地址)
        """
        set_seed(self.__seed)
        self.__load_dataset()
        self.__net.eval()
        predicted = None
        path = self.__params_dict["result_save_path"]
        with torch.no_grad():
            for inputs in self.__test_loader:
                inputs = inputs[0].to(torch.float32).to(self.__device)
                y_pred = self.__net(inputs)
                predicted = probs2preds(y_pred.data)
        predicted_json = ndarray2json(predicted.cpu().numpy())
        with open(path, "w") as f:
            f.write(predicted_json)
        return predicted_json, path


if __name__ == "__main__":
    """
    用户在进行模型的重新训练时，需要传入为新训练模型设置的模型名称、训练集名称。可选地传入验证集名称和超参数。
    若用户不传入验证集，则划分训练集的10%作为验证集；若用户不传入超参数，则使用默认值。
    训练完成后，程序会在pics文件夹中生成训练集、验证集的损失、F1值变化曲线和混淆图共3张图(svg格式)。
    训练函数还支持查看TensorBoard，在保存日志文件的目录下的cmd中输入tensorboard --logdir=./并打开网址http://localhost:6006/即可查看TensorBoard
    同时，训练的train函数会返回((最小验证损失, 最小验证损失位置), (最小训练损失, 最小训练损失位置),(最大验证F1, 最大验证F1位置), (最大训练F1, 最大训练F1位置))

    所以，在后端训练函数中，采用ModelTrain(模型名, 训练集名称, 验证集名称, 超参数值)来实例化训练类，调用train方法
    来进行训练。并可以接收train的返回值获取训练的相关信息用于展示。也可以从pics文件夹中获取用于在前端展示的图片。
    网站可以可选地支持用户查看TensorBoard。

    用户在进行预测任务时，需要传入用以测试的模型名称和测试集名称。测试完成后，测试结果的json文件会被保存在result文件夹下。
    同时, predict函数返回(json格式的结果, 保存json文件的地址)

    所以，在后端预测函数中，采用ModelEval(模型名, 测试集名称)来实例化预测类，调用predict方法来进行预测。
    预测结果可通过接收predict的返回值获得，也可以通过读取results文件夹中的预测结果文件获得。

    注意，ModelEval的__load_dataset函数的一行代码在实际运行的时候需要修改（原因是用验证集充当的测试集，验证集有标签而测试集无标签）
    """
    seed = 1024
    l1s = [6e-4, 8e-4, 1e-3, 1.2e-3, 1.4e-3]
    l2s = [6e-3, 8e-3, 1e-2, 1.2e-2, 1.4e-2]
    for l1 in l1s:
        for l2 in l2s:
            set_seed(seed)
            print(f"l1_penalty : {l1}, l2_penalty : {l2}")
            model_train = ModelTrain(
                "default_model", "train_10000", "validate_1000", seed, l1_penalty = l1, l2_penalty = l2)
            model_train.train()


    # model_eval = ModelEval("default_model", "validate_1000")
    # json_f = model_eval.predict()[0]

    # test_data_path = os.path.join("train_datasets", "validate_1000.csv")
    # test_dataset = pd.read_csv(test_data_path, index_col=0, header=0)
    # _, y_test = test_dataset.drop(
    #     "label", axis=1).values, test_dataset["label"].values
    # print(f1_score(y_test, list(json.loads(json_f).values()), average='macro'))
