from typing import Callable

import lightgbm as lgb
import numpy as np
import pandas as pd
import xgboost as xgb
from lightgbm import LGBMClassifier
from lightgbm import LGBMRegressor
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns

def kfold_objective(X_raw: pd.DataFrame, y: pd.Series, model: str = 'lgb', params: dict = None, n_splits=5, random_state=7,
                    shuffle=True) -> Callable:
    kf = KFold(n_splits=n_splits, random_state=random_state, shuffle=shuffle)
    raw_features = X_raw.columns.values.tolist()

    def ks_score(kwargs: dict) -> float:
        for k in ('feature_num', 'max_depth', 'num_leaves', 'n_estimators','min_child_samples'):
            if isinstance(kwargs.get(k), float):
                kwargs[k] = int(kwargs[k])
        ks_list = []
        chosen_feature = raw_features[:kwargs['feature_num']]
        X = X_raw[chosen_feature]
        for train_idx, test_idx in kf.split(X):
            X_train, X_test = X.iloc[train_idx].values, X.iloc[test_idx].values
            y_train, y_test = y.iloc[train_idx].values, y.iloc[test_idx].values
            params.update(kwargs)
            params.pop('n_estimators')

            if model == 'lgb':
                train_data = lgb.Dataset(X_train, y_train, silent=False)
                cv_result = lgb.cv(params, train_data, num_boost_round=500, nfold=3, metrics='auc',
                                   early_stopping_rounds=100)
                params['n_estimators'] = len(cv_result['auc-mean'])
                clf = LGBMClassifier(**params)
                clf.fit(X_train, y_train)
                y_pred_test = clf.predict_proba(X_test)[:, 1]
            elif model == 'xgb':
                train_data = xgb.DMatrix(X_train, y_train, silent=False)
                cv_result = xgb.cv(params, train_data, num_boost_round=500, nfold=3, metrics='auc',
                                   early_stopping_rounds=100)
                params['n_estimators'] = len(cv_result)
                clf = XGBClassifier(**params)
                clf.fit(X_train, y_train)
                y_pred_test = clf.predict_proba(X_test)[:, 1]
            elif model == 'adaboost':
                clf = AdaBoostClassifier(DecisionTreeClassifier(**kwargs, min_samples_leaf=0.05))
                clf.fit(X_train, y_train)
                y_pred_test = clf.predict_proba(X_test)[:, 1]
            else:
                raise NotImplementedError("Not implemented!")

            ks_list.append(calc_ks(y_pred_test, y_test, method='crosstab'))
        ks_arr = np.asarray(ks_list)
        score = np.mean(ks_arr) - 1.96 * np.std(ks_arr) / np.sqrt(len(ks_arr))
        return -score

    return ks_score

from logging import getLogger
logger = getLogger(__name__)
from typing import Tuple, Union

def feature_imp_score(lgbm_model,origin_features, plot=True):
    fscore = lgbm_model.feature_importances_
    fscore_sort = pd.Series(fscore,index=origin_features).sort_values(ascending=False)
#    fscore_sort = fscore_sort[fscore_sort>0]
    fcount = len(fscore_sort)
    width = 15 if fcount <= 60 else fcount / 4
    if plot:
        fscore_sort.plot(kind='bar', title='Feature Importances', figsize=(width, 8))
        # plt.xticks(rotation=60)
    return fscore_sort

def calc_ks(score: Union[pd.Series, np.ndarray], target: Union[pd.Series, np.ndarray], method='origin_pf') -> float:
    """
    Calculate ks value between predict and target.

    :param score: predict result (can be score or probability)
    :param target: target label
    :param method: method name, e.g. 'origin', 'crosstab', 'roc'
    :return: ks value

    :example:
    >>> data_1 = pd.DataFrame({'y': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'X': [1, 2, 4, 2, 2, 6, 5, 3, 0, 5, 4, 18]})
    >>> X, y = data_1['X'], data_1['y']
    >>> calc_ks(X, y)
    0.5
    >>> calc_ks(X, y, method='roc')
    0.5
    >>> calc_ks(X, y, method='origin')
    0.5
    """
    if isinstance(score, pd.Series):
        score = score.values
    if isinstance(target, pd.Series):
        target = target.values
    assert len(np.unique(target[~np.isnan(target)])) == 2, "Binary target is required!"
    if method == 'crosstab':
        freq = pd.crosstab(score, target)
        dens = freq.cumsum(axis=0) / freq.sum()
        ks = np.max(np.abs(dens[0] - dens[1]))
    elif method == 'roc':
        from sklearn.metrics import roc_curve
        # roc_curve函数默认添加了原点
        fpr, tpr, _ = roc_curve(target, score)
        ks = np.max(np.abs(tpr - fpr))
        return ks
    # FIXED: 原始方法不能处理包含缺失值的情况
    elif method == 'origin':
        notna_pos = ~np.isnan(score)
        score = score[notna_pos]
        target = target[notna_pos]
        sorted_score = np.sort(np.unique(score))
        bad_tot = np.sum(target == 1)
        good_tot = np.sum(target == 0)

        bad_good_diff = (
            abs(np.sum((score <= s) & (target == 1)) / bad_tot - np.sum(
                (score <= s) & (target == 0)) / good_tot)
            for s in sorted_score
        )
        ks = max(bad_good_diff)
    elif method == 'origin_pf':
        section_num = 20
        Y=pd.Series(target)
        sample_num=len(Y)
        bad_percent=np.zeros([section_num,1])
        good_percent=np.zeros([section_num,1])
        point=pd.DataFrame(score)
        sorted_point=point.sort_values(by=0)
        total_bad_num=len(np.where(Y==1)[0])
        total_good_num=len(np.where(Y==0)[0])
        for i in range(0,section_num):
            split_point=sorted_point.iloc[int(round(sample_num*(i+1)/section_num))-1]
            position_in_this_section=np.where(point<=split_point)[0]
            bad_percent[i]=len(np.where(Y.iloc[position_in_this_section]==1)[0])/total_bad_num
            good_percent[i]=len(np.where(Y.iloc[position_in_this_section]==0)[0])/total_good_num
            
        ks_value=bad_percent-good_percent
        ks = max(ks_value)[0]
    else:
        raise ValueError("Unsupported method {} encountered!".format(repr(method)))
    logger.info("Current predict KS is {}...".format(ks))
    return ks


def calc_auc(preds: Union[np.ndarray, pd.Series], actual: Union[np.ndarray, pd.Series]) -> float:
    """
    Calculate receiver operating characteristic.

    :param preds: predict result
    :param actual: actual label
    :return: false positive rate, true positive rate and auc value
    """
    if isinstance(actual, pd.Series):
        actual = actual.values
    if isinstance(preds, pd.Series):
        preds = preds.values

    notna_pos = ~np.isnan(preds)
    actual = actual[notna_pos]
    preds = preds[notna_pos]

    fpr, tpr, _ = roc_curve(actual, preds)
    auc_value = auc(fpr, tpr)
    logger.info("Current predict AUC is {}...".format(auc_value))
    return auc_value

def models_auc(hyper_param: str, space: list, auc_trains: list, auc_tests: list) -> None:
    """
    Plot train test data distribution curve.

    :param hyper_param: hyper parameter
    :param space: parameter value space
    :param auc_trains: train data auc
    :param auc_tests: test data auc
    :return: None
    """
    plt.figure()
    plt.plot(space, auc_trains, 's-', color='r', label="AUC train")
    plt.plot(space, auc_tests, 'o-', color='g', label="AUC test")
    
    if len(space)<=20:
        plt.xticks(space)
    else:
        pass
    

    plt.legend(loc='lower right')
    plt.ylabel('AUC value')
    plt.xlabel(f"{hyper_param}")
    plt.title(f"AUC value vs. Parameter tuning")
    plt.show()
    return None

def models_ks(hyper_param: str, space: list, ks_trains: list, ks_tests: list) -> None:
    """
    Plot train test data distribution curve.

    :param hyper_param: hyper parameter
    :param space: parameter value space
    :param ks_trains: train data ks
    :param ks_tests: test data ks
    :return: None
    """
    plt.figure()
    plt.plot(space, ks_trains, 's-', color='r', label="KS train")
    plt.plot(space, ks_tests, 'o-', color='g', label="KS test")
    if len(space)<=20:
        plt.xticks(space)
    else:
        pass
    plt.legend(loc='lower right')
    plt.ylabel('KS value')
    plt.xlabel(f"{hyper_param}")
    plt.title(f"KS value vs. Parameter tuning")
    plt.show()
    return None


class LGBModelTuner(object):
    def __init__(self, lgbm, X, y, X_test, y_test, hit_indices):
        self.estimator = lgbm
        self.X, self.Y = X, y
        self.X_test, self.Y_test = X_test, y_test
        assert hit_indices is not None, "Hit indices are necessary to get model result!"
        self.hit_indices = hit_indices
        self.dataset_train = lgb.Dataset(X.values, label=y.values)
        self.dataset_test = lgb.Dataset(X_test.values, label=y_test.values)
        # TODO: add history
        self.history = []
        if isinstance(lgbm, lgb.Booster):
            assert lgbm.params.get('seed') is not None, "Better to set `seed` for lightgbm booster!"
            lgbm.params.update({'num_threads': -1})
            self.params = lgbm.params
        elif isinstance(lgbm, (LGBMClassifier, LGBMRegressor)):
            assert lgbm.get_params().get('random_state') is not None, "Better to set `random_state` for lightgbm booster!"
            lgbm.n_jobs = -1
            self.params = lgbm.get_params()
        else:
            raise TypeError("Input model should be a `lgb.Booster` or `LGBMClassifier`/`LGBMRegressor`!")

    def get_model_result(self, params: dict) -> dict:
        X, y = self.X.values, self.Y.values
        X_test, y_test = self.X_test.values, self.Y_test.values
        if isinstance(self.estimator, lgb.Booster):
            params['metric'] = 'auc'
            estimator = lgb.train(params, self.dataset_train)
            pred_train = pd.Series(estimator.predict(self.dataset_train), index=self.X.index)
            pred_test = pd.Series(estimator.predict(self.dataset_test), index=self.X_test.index)
        elif isinstance(self.estimator, LGBMRegressor):
            estimator = LGBMRegressor(**params)
            estimator.fit(X, y, eval_metric='auc')
            pred_train = pd.Series(estimator.predict(X), index=self.X.index)
            pred_test = pd.Series(estimator.predict(X_test), index=self.X_test.index)
        elif isinstance(self.estimator, LGBMClassifier):
            estimator = LGBMClassifier(**params)
            estimator.fit(X, y, eval_metric='auc')
            pred_train = pd.Series(estimator.predict_proba(X)[:, 1], index=self.X.index)
            pred_test = pd.Series(estimator.predict_proba(X_test)[:, 1], index=self.X_test.index)
        else:
            raise TypeError("Input model should be a `lgb.Booster` or `LGBMClassifier`/`LGBMRegressor`!")
        # 置空得分
        pred_train.loc[~pred_train.index.isin(self.hit_indices)] = np.nan
        pred_test.loc[~pred_test.index.isin(self.hit_indices)] = np.nan
        # 计算模型评估指标
        ks_train, ks_test = calc_ks(-pred_train, y), calc_ks(-pred_test, y_test)
        auc_train, auc_test = calc_auc(pred_train, y), calc_auc(pred_test, y_test)
        # return {'train': (ks_train, auc_train), 'test': (ks_test, auc_test)}
        return {'ks': (ks_train, ks_test), 'auc': (auc_train, auc_test)}

    def try_tune(self, param: str, space: list, plot: bool = True) -> None:
        params = {k: v for k, v in self.params.items()}
        ks_trains, ks_tests = [], []
        auc_trains, auc_tests = [], []
        for value in space:
            params.update({param: value})
            result = self.get_model_result(params)
            ks_train, ks_test = result['ks'][0], result['ks'][1]
            auc_train, auc_test = result['auc'][0], result['auc'][1]
            ks_trains.append(ks_train)
            ks_tests.append(ks_test)
            auc_trains.append(auc_train)
            auc_tests.append(auc_test)
            logger.info(
                "While {}={}, model ks train={}, test={}, auc is train={}, test={}..."
                .format(param, repr(value) if isinstance(value, str) else value, ks_train, ks_test, auc_train, auc_test)
            )
        if plot:
            models_ks(param, space, ks_trains, ks_tests)
            models_auc(param, space, auc_trains, auc_tests)
        return None

    def tune(self, param: str, value: Union[str, int, float]) -> None:
        self.params.update({param: value})
#        check_lgb_params(self.params)
        lgbm = self.estimator
        if isinstance(lgbm, lgb.Booster):
            lgbm.params = self.params
        elif isinstance(lgbm, (LGBMClassifier, LGBMRegressor)):
            lgbm.set_params(**self.params)
        else:
            raise TypeError("Input model should be a `lgb.Booster` or `LGBMClassifier`/`LGBMRegressor`!")
        logger.info(self.params)
        return None

def tune_feature_num(params,X, Y, X_test, Y_test, feature_df,hit_indices, min_feature_num=5, step=5): 
    space = []
    ks_trains, ks_tests = [], []
    auc_trains, auc_tests = [], []
    for i in range(min_feature_num,len(feature_df)+min_feature_num,step):
        print (i)
        lgb_rfe_features_2 = feature_df.loc[feature_df.rank_lgb_rfe<=i].index.tolist()
        print(len(lgb_rfe_features_2))
        lgbm_tuner = LGBModelTuner(LGBMClassifier(**params), X[lgb_rfe_features_2], Y, X_test[lgb_rfe_features_2], Y_test, hit_indices)
        result_ks = lgbm_tuner.get_model_result(params)
        train_ks = result_ks['ks'][0]
        test_ks = result_ks['ks'][1]
        train_auc = result_ks['auc'][0]
        test_auc = result_ks['auc'][1]
        ks_trains.append(train_ks)
        ks_tests.append(test_ks)
        auc_trains.append(train_auc)
        auc_tests.append(test_auc)
        space.append(i)
    
    models_ks('feature_num', space, ks_trains, ks_tests)
    models_auc('feature_num', space, auc_trains, auc_tests)

    return space,ks_trains, ks_tests, auc_trains, auc_tests

def tune_n_estimators_learning_rate(lgbm_tuner, learning_rate=[], n_estimators=[]):
    ks_trains = []
    ks_tests = []
    auc_trains = []
    auc_tests = []
    n_estimators = n_estimators
    learning_rate = learning_rate
    for i in n_estimators:
        for j in learning_rate:
            lgbm_tuner.tune('n_estimators',i)
            lgbm_tuner.tune('learning_rate',j)
            params = lgbm_tuner.params
            result_ks = lgbm_tuner.get_model_result(params)
            train_ks = result_ks['ks'][0]
            test_ks = result_ks['ks'][1]
            train_auc = result_ks['auc'][0]
            test_auc = result_ks['auc'][1]
            ks_trains.append(train_ks)
            ks_tests.append(test_ks)
            auc_trains.append(train_auc)
            auc_tests.append(test_auc)
    
    n_estimators_a = np.array(n_estimators)
    learning_rate_a = np.array(learning_rate)
    learning_rate_a, n_estimators_a = np.meshgrid(learning_rate_a, n_estimators_a)
    
    n_estimators_a.shape = (1,len(n_estimators)*len(learning_rate))
    learning_rate_a.shape = (1,len(n_estimators)*len(learning_rate))
    
    df = pd.DataFrame()
    df['n_estimators'] = n_estimators_a[0]
    df['learning_rate'] = learning_rate_a[0]
    df['auc_trains'] = auc_trains
    df['auc_tests'] = auc_tests
    
    df_pt = df.pivot_table(index='learning_rate', columns='n_estimators')
    df_pt.head()
    
    df_test = df_pt['auc_tests']
    df_train = df_pt['auc_trains']
    
    return df_train, df_test

def plot_n_estimators_learning_rate(df_train,df_test):
    f, ax = plt.subplots(figsize = (10, 4))
    cmap = sns.cubehelix_palette(start = 1, rot = 3, gamma=1, as_cmap = True,)
    sns.heatmap(df_train, cmap = cmap, linewidths = 0.05, ax = ax,  center=1.05*df_train.max().max())
    ax.set_title('Auc of Train cross by n_estimators and learning_rate')
    ax.set_xlabel('n_estimators')
    ax.set_ylabel('learning_rate')

    f, ax = plt.subplots(figsize = (10, 4))
    cmap = sns.cubehelix_palette(start = 1, rot = 3, gamma=0.8, as_cmap = True)
    sns.heatmap(df_test, cmap = cmap, linewidths = 0.05, ax = ax, center=1.05*df_test.max().max())
    ax.set_title('Auc of Test cross by n_estimators and learning_rate')
    ax.set_xlabel('n_estimators')
    ax.set_ylabel('learning_rate')
    
    f, ax = plt.subplots(figsize = (10, 4))
    cmap = sns.cubehelix_palette(start = 1, rot = 3, gamma=0.8, as_cmap = True)
    sns.heatmap(df_train-df_test, cmap = cmap, linewidths = 0.05, ax = ax, center=1.5*(df_train-df_test).max().max())
    ax.set_title('Auc of (Train-test) cross by n_estimators and learning_rate')
    ax.set_xlabel('n_estimators')
    ax.set_ylabel('learning_rate')