# -*- coding: utf-8 -*-
# 牛泽林的的model包，包括sklearn和xgboost的类使用
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import *
from sklearn import metrics
from sklearn.model_selection import cross_val_score, GridSearchCV
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 用黑体显示中文
matplotlib.rcParams['axes.unicode_minus'] = False    # 正常显示负号


class SKLearnModel:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def cv(self, estimator, savepath, cv_scorer=None, max_scorer=False, cv_fold=5, is_xgb=False):
        SKLearnModel.parameter_test(cv_scorer)
        if is_xgb:
            xgb_model = estimator
            xgb_cv = xgb_model.cv_train(self.x, self.y, feval=cv_scorer, maximize=max_scorer, nfold=5)
            XGBoost.cv_learning_curve_plot(xgb_cv, savepath)
            return xgb_cv
        else:
            cv_scorer = make_scorer(cv_scorer, greater_is_better=max_scorer)
            scores = cross_val_score(
                estimator=estimator,
                X=self.x,
                y=self.y,
                scoring=cv_scorer,
                cv=cv_fold)
            return scores

    def grid_search(self, estimator, savepath, param_grid, scorer, max_scorer=False, cv_fold=5, is_xgb=False, bst_name=""):
        SKLearnModel.parameter_test(scorer)
        scorer = make_scorer(scorer, greater_is_better=max_scorer)
        gs = GridSearchCV(
            estimator=estimator,
            param_grid=param_grid,
            scoring=scorer,
            cv=cv_fold)

        gs.fit(self.x, self.y)
        plt.figure(figsize=(10, 5))
        plt.plot(gs.cv_results_["mean_test_score"])
        plt.title(gs.best_params_)
        plt.xlabel("iteration")
        plt.grid(True)
        plt.show()

        grid_best = dict(
            estimator=gs.best_estimator_,
            score=gs.best_score_,
            param=gs.best_params_,
            cv_result=gs.cv_results_
        )
        if is_xgb:
            XGBoost.plot_feature_importance(model=gs.best_estimator_.bst, savepath=savepath)
            if len(bst_name) != 0:
                gs.best_estimator_.bst.save_model(r'train_model\%s.model' % bst_name)
        return gs, grid_best

    @classmethod
    def predict(cls, model, savepath, x_test, y_test, label=None, is_xgb=False):
        if is_xgb:
            d_test = xgb.DMatrix(x_test, y_test)
            y_pred = model.predict(d_test, ntree_limit=model.best_ntree_limit)
        else:
            y_pred = model.predict(x_test)
        if label:
            pass                    # 多分类怎么变成label
            if label == "binary":
                y_pred = (y_pred >= 0.45) * 1
                print('ACC(准确率): %.4f' % accuracy_score(y_test, y_pred))
                print('Recall(1-拒真率): %.4f' % recall_score(y_test, y_pred))
                print('Precesion(1-纳伪率): %.4f' % precision_score(y_test, y_pred))
                print('F1-score: %.4f' % f1_score(y_test, y_pred))
                print("auroc: %.4f" % roc_auc_score(y_test, y_pred))
                print("confusion matrix\n", confusion_matrix(y_test, y_pred))

                # 写入txt
                file_test_result = open(savepath + r'\test_result.txt', 'a')
                file_test_result.writelines('\n本次训练结果\n')
                file_test_result.writelines('ACC(准确率): %.4f\n' % accuracy_score(y_test, y_pred))
                file_test_result.writelines('Recall(1-拒真率): %.4f\n' % recall_score(y_test, y_pred))
                file_test_result.writelines('Precesion(1-纳伪率): %.4f\n' % precision_score(y_test, y_pred))
                file_test_result.writelines('F1-score: %.4f\n' % f1_score(y_test, y_pred))
                file_test_result.writelines("auroc: %.4f\n" % roc_auc_score(y_test, y_pred))
                file_test_result.close()
        else:
            print('MSE: %.4f' % mean_squared_error(y_test, y_pred))
            print('MAE: %.4f' % mean_absolute_error(y_test, y_pred))
            print('MSLE: %.4f' % mean_squared_log_error(y_test, y_pred))
        return y_pred

    @classmethod
    def parameter_test(cls, scorer):
        str1 = "请设置scorer" \
               "sklearn算法可利用sklearn.metrics里的函数" \
               "使用xgboost算法, 需自定义函数, 注意交互验证与网格搜索调参不同;" \
               "请参照 @xgb_cv_precision_score @precision_binary"
        assert scorer is not None, str1


class XGBoost(object):
    """
    Xgboost模型训练和调参 的程式框架
    ------------------------------------------------------------------
    输入参数
        -params: xgboost参数设置; 必须输入'objective'一项
        -loss_func: 自定义loss_function
        -boost_round: 训练多少轮
    返回
        -
    ------------------------------------------------------------------
    默认参数见子函数 @get_params()
    调参顺序：
        - max_depth                 树的最大深度; 越大模型越复杂
        - min_child_weight          正则化参数; 树分区中的实例权重小于定义的总和，则停止树构建过程
        - gamma 降低模型过拟合风险.
        - subsample
        - colsample_bytree 改变数据采样策略.
        - eta.
    """

    def __init__(self, **kwargs):
        XGBoost.params_test(kwargs)
        self.params = XGBoost.origin_params()
        self.params = kwargs
        if 'num_boost_round' in kwargs:
            self.num_boost_round = kwargs['num_boost_round']
        self.params.update(kwargs)

    def cv_train(self, x_train, y_train, feval, maximize=False, nfold=5):
        dtrain = xgb.DMatrix(x_train, y_train)
        xgb_cv = xgb.cv(
            params=self.params,
            dtrain=dtrain,
            num_boost_round=self.num_boost_round,
            nfold=nfold,
            feval=feval,                # 自定义评判标准
            maximize=maximize,          # 是否要最大化feval; 损失函数最小化; 准确率要最大化
            early_stopping_rounds=20,   # 20轮内必须减少损失函数，否则停止
        )
        return xgb_cv

    def fit(self, x_train, y_train):
        dtrain = xgb.DMatrix(x_train, y_train)
        self.bst = xgb.train(
            params=self.params,
            dtrain=dtrain,
            num_boost_round=self.num_boost_round,
        )

    def predict(self, x_pred):
        dpred = xgb.DMatrix(x_pred)
        return self.bst.predict(dpred)

    def get_params(self, deep=True):
        return self.params

    def set_params(self, **params):
        self.params.update(params)
        return self

    @classmethod
    def cv_learning_curve_plot(cls, cv, savepath):
        col_mean = np.array(cv.columns[cv.columns.str.endswith("mean")])
        for i in np.arange(np.ceil(len(col_mean) / 2)):
            col = list(col_mean[[int(i) * 2, int(i) * 2 + 1]])
            cv.loc[:, col].plot(grid=True)
            plt.savefig(savepath + r'\learning_curve%s.png' % i)
            plt.show()

    @classmethod
    def plot_feature_importance(cls, model, savepath):
        xgb.plot_importance(booster=model)
        df_fi = model.get_fscore()
        df_fi = pd.DataFrame([df_fi])
        val_threshold = df_fi.quantile(q=0.1, axis=1)
        df_fi = df_fi[df_fi > val_threshold[0]]
        df_fi = df_fi.dropna(axis=1, how='any')
        df_fi.to_excel(savepath + r'\feature_importance.xlsx', index=False)
        plt.savefig(savepath + r'\feature_importance.png')
        plt.show()

    @classmethod
    def params_test(cls, params):
        if 'objective' not in params:
            raise ValueError(
                "必须输入xgboost对应的objective\n"
                "参数设置:https://xgboost.readthedocs.io/en/latest/parameter.html")
        else:
            if params['objective'] == 'multi: softmax':
                assert 'num_class' in params, "当objective为多分类时, 必须设置'num_class'"

    @staticmethod
    def origin_params():
        params = {
            'eta': 0.007,  # 类似于学习率
            'max_depth': 8,  # 构建树的深度，越大越容易过拟合
            'lambda': 2,  # L2正则化项参数，参数越大，模型越不容易过拟合
            'subsample': 1,  # 随机采样训练样本的比例
            'colsample_bytree': 0.7,  # 生成树时进行的列采样比例
            'silent': 0,  # 设置成1则没有运行信息输出，最好是设置为0
            'seed': 0,  # 随机因子
        }
        return params


class Scorer(object):
    def __init__(self, scorer="", maximize=False, classification="binary", is_xgb=False):
        self.scorer = scorer
        self.maximize = maximize
        self.is_xgb = is_xgb
        self.classification = classification

    def set_scorer(self):
        scorer = make_scorer(self.scorer, greater_is_better=self.maximize)
        return scorer

    @classmethod
    def profit(cls):
        """胜率×盈亏比"""
        pass

    def get_scorer(self):
        scorer_list = metrics.__all__ + ["profit"]
        if self.scorer not in scorer_list:
            "输入的scorer有误; 必须为以下字符串\n{}".format(scorer_list)
        else:
            if self.is_xgb:
                if self.scorer in ["mean_absolute_error", "precision_score"]:
                    if (self.classification != "binary") and (self.scorer == "precision_score"):
                        print("非常抱歉,目前尚不支持多分类,只支持二分类")
                    else:
                        return eval("xgb_" + self.scorer)
            else:
                return eval(self.scorer)


def xgb_cv_mean_absolute_error(yhat, dtrain):
    y = dtrain.get_label()
    return 'mae', mean_absolute_error(y, yhat)


def xgb_cv_precision_score(yhat, dtrain):
    y_true = dtrain.get_label()
    yhat = pd.Series(yhat).apply(lambda k: 1 if k >= 0.5 else 0).values
    return 'precision', precision_score(y_true, yhat)


def xgb_cv_recall_score(yhat, dtrain):
    y_true = dtrain.get_label()
    yhat = pd.Series(yhat).apply(lambda k: 1 if k >= 0.5 else 0).values
    return 'recall', recall_score(y_true, yhat)


def precision_binary(y_true, y_pred):
    y_pred = pd.Series(y_pred).apply(lambda k: 1 if k >= 0.5 else 0).values
    return precision_score(y_true, y_pred)


def recall_binary(y_true, y_pred):
    y_pred = pd.Series(y_pred).apply(lambda k: 1 if k >= 0.5 else 0).values
    return recall_score(y_true, y_pred)


def accuracy_binary(y_true, y_pred):
    y_pred = pd.Series(y_pred).apply(lambda k: 1 if k >= 0.5 else 0).values
    return accuracy_score(y_true, y_pred)