import pandas as pd
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
# from finetune import FineTune
from utils import MatrixLog

sns.set()


def _set_models(model, params):
    if len(params) == 0:
        return model()
    else:
        return model(**params)


def _set_eval_models(eval_models):
    ret = {i: _set_models(eval_models[i], {"probability": True}) for i in eval_models}
    return ret


def _cal_test_set(clf, test_set):

    prob = clf.predict_proba(test_set[0])[:, 1]
    fpr, tpr, _ = roc_curve(test_set[1], prob)
    auc = roc_auc_score(test_set[1], prob)
    return fpr, tpr, auc


def _fusion_scores(dst: list, fpr, tpr, name):
    names = [name] * len(fpr)
    dst[0].extend(names)
    dst[1].extend(fpr)
    dst[2].extend(tpr)


def _fusion(dst_scores: dict, dst_roc: list, dst_auc: dict, scores):
    auc_names = list(scores[2].keys())
    for i, key in enumerate(scores[0]):
        dst_scores[key] = scores[0][key]
        dst_auc[auc_names[i]] = scores[2][auc_names[i]]

    dst_roc[0].extend(scores[1][0])
    dst_roc[1].extend(scores[1][1])
    dst_roc[2].extend(scores[1][2])


def plot_roc(df, ax):
    sns.lineplot(data=df, x='fpr', y='tpr', hue='model', ax=ax)


class Model:
    """
    model:机器学习模型
    score_fun:指标函数集 dict
    eval_models:用于测试的机器学习模型 dict
    pre_processor:预处理程序
    test_size:测试集比例
    params:其它参数
    """

    def __init__(self, model, score_fun: dict, eval_models: dict = None,
                 pre_processor=None, test_size=0.3):

        self.model = model
        self.eval_models = eval_models

        self.X = None
        self.y = None
        self.features_name: list = []

        self.test_size = test_size

        self.pre_processor = pre_processor

        self.clf = None

        self.score_fun: dict = score_fun
        self.roc = None
        if 'roc' in score_fun:
            self.roc = True
            self.score_fun.pop('roc')
        self.metrics = MatrixLog(**self.score_fun)

    def _split_data(self):
        x_train, x_test, y_train, y_test = train_test_split(self.X, self.y,
                                                            test_size=self.test_size, shuffle=True, random_state=1)

        # 数据预处理
        if self.pre_processor is not None:
            premodel = self.pre_processor().fit(x_train)
            x_train = premodel.transform(x_train)
            x_test = premodel.transform(x_test)

        return (x_train, y_train), (x_test, y_test)

    def forward(self, train_set):

        self.clf = self.model.fit(train_set[0], train_set[1])

        eval_clf = {m: self.eval_models[m].fit(train_set[0], train_set[1]) for m in self.eval_models}

        return eval_clf

    def _eval_models(self, eval_clf, test_set):
        scores = dict()
        for clf in eval_clf:
            pred = eval_clf[clf].predict(test_set[0])
            print("model:{}".format(clf))
            self.metrics(test_set[1], pred, True)
            print("*" * 30)
            scores[clf] = self.metrics.to_df()

        return scores

    def _main_model(self, test_set):
        pred = self.clf.predict(test_set[0])
        print("model:{}".format(str(self.clf)))
        self.metrics(test_set[1], pred, True)
        print("*" * 30)
        return self.metrics.to_df()

    def print_single_score(self, scores, auc_scores, mode):
        names = list(self.score_fun.keys())
        keys = list(auc_scores.keys())

        for i in range(len(scores)):
            print("model[{:2s}]({:2s})\t".format(keys[i], mode), end="")
            for k in range(len(names)):
                print("{:2s} = {:.4f}\t".format(names[k], scores[keys[i]][k]), end="")

            print("{:2s} = {:.4f}".format("auc", auc_scores[keys[i]]))

    def test_models(self, test_set, evals):
        if len(self.score_fun) == 0:
            raise "必须指定测试函数"
        scores = [[], [], []]
        vals = dict()
        fpr, tpr, auc = _cal_test_set(self.clf, test_set)
        _fusion_scores(scores, fpr, tpr, str(self.clf))
        vals[str(self.clf)] = auc
        for eval_name in evals:
            fpr, tpr, auc = _cal_test_set(evals[eval_name], test_set)
            _fusion_scores(scores, fpr, tpr, eval_name)
            vals[eval_name] = auc

        roc_scores = {"model": scores[0], "fpr": scores[1], "tpr": scores[2]}

        df = pd.DataFrame(roc_scores)

        def fun(x, val):
            return x + ": auc = {:.4f}".format(val[x])

        df['model'] = df[['model']].map(fun, val=vals)['model']
        fig, ax = plt.subplots()

        plot_roc(df, ax)
        plt.show()
        return scores, df

    def fit(self, X, y, features=None):

        if features is not None:
            self.features_name = features

        self.X = X
        self.y = y
        train_set, test_set = self._split_data()
        evals = self.forward(train_set)

        self._main_model(test_set)
        self._eval_models(evals, test_set)
        if self.roc is not None:
            self.test_models(test_set, evals)
        return self.clf

    def __call__(self, x):
        return self.clf.predict(x)
