# coding:utf-8
import logging
import xgboost
import random
import numpy as np
import pandas as pd
from .general import get_train_test


def feature_importance_xgboost(y, df, n_models, model_para_ref={}, use_seed=False, task='classification', kfold=False, kfold_k=0, kfold_shuffle=False, scorer=None, verbose=False):
    '''
    计算多次xgboost模型，给出所有特征的重要程度打分

    输入参数说明：
        y: 代表目标列的字符， 如target
        df: DataFrame。除了y的列剩下的列都用做训练
        n_models: 训练多少次
        model_para_ref: 形如：
                para_ref =    {
                    'max_depth': [2, 3, 4],
                    'learning_rate': [0.1, 0.5, 1.0, 1.5],
                    'objective': ['rank:pairwise']
                }
                每一个参数都给出一些候选。会随机在这些里面选择n_models个参数训练
        use_seed: 如果为True，每次训练还会强行加入一个随机参数seed。
        task: classification表示使用XGBClassifier，否则使用XGBRegressor
        kfold、kfold_k、kfold_shuffle、scorer: kfold=True时这几个参数才有效果：
                kfold_k: 使用多少折
                kfold_shuffle: kfold时是否使用shuffle打乱
                scorer: 形如scorer(model, X, y_gt)的函数，输出一个数表示模型的打分。使用kfold必须传入该函数否则kfold无意义。
        verbose: 是否输出详细信息

    输出结果：
        kfold=False时，输出result，代表每个feature的importance，从大到小排列。
        kfold=True时，输出result，avg_score，avg_score代表模型的平均打分。

    举例：
    如只想尝试一组参数，在不同seed下的效果：
        model_para_ref = {
            'depth': [1],
            'learning_rate': [0.1]
        }
        use_seed=True
    '''

    y_df = df[y]
    X_df = df.drop([y], axis=1)
    logger = logging.getLogger('feature_importance_xgboost')
    feature_name = X_df.columns.tolist()
    feature_importance = np.zeros(len(feature_name))
    score = 0
    score_n = 0

    for i in range(n_models):
        para = {}
        for key in model_para_ref:
            choice = model_para_ref[key][random.randint(0, len(model_para_ref[key]) - 1)]  # 在每个参数中都随机选取一个
            para[key] = choice
        if use_seed:
            para['seed'] = random.randint(0, 10000)

        if verbose:
            logger.warning('model %d / %d' % (i + 1, n_models))
            logger.warning('use para: %s' % str(para))

        # 分离x_train, y_train
        for X_train, y_train, X_test, y_test in get_train_test(X_df.values, y_df.values, kfold, kfold_k, kfold_shuffle):
            if task == 'classification':
                model = xgboost.XGBClassifier(**para)
            else:
                model = xgboost.XGBRegressor(**para)
            model.fit(X_train, y_train)
            feature_importance += model.feature_importances_
            if kfold is True:
                score_i = scorer(model, X_test, y_test)
                score += score_i
                score_n += 1
                if verbose:
                    logger.warning('model %d score %f' % (i + 1, float(score_i)))

    if kfold is True:
        feature_importance = feature_importance / score_n
    else:
        feature_importance = feature_importance / n_models

    result = pd.DataFrame(feature_importance, index=feature_name, columns=['importance'])
    result = result.sort_values('importance', ascending=False)

    if kfold is True:
        return result, float(score) / score_n
    else:
        return result
