from . import config
import pandas as pd
import sys
sys.path.append(config.pj_root)
import steward as st
import pickle
import logging
import random
import xgboost
import numpy as np


def scorer(model, df, test_index, X_test):
    y_pred = model.predict_proba(X_test)[:, 1]
    gt = df[['orderid', 'orderlabel']].iloc[test_index]
    N = len(gt[gt.orderlabel == 1])
    table = gt.join(pd.DataFrame(y_pred, index=gt.index, columns=['pred_proba']))
    pred_every_order = table.groupby('orderid').apply(lambda x: x[x.pred_proba == x.pred_proba.max()].iloc[0]['orderlabel'])
    return len(pred_every_order[pred_every_order == 1]) / N


def train_fold(X_df, y_df, use_model=config.model, model_para=config.model_para, kfold_k=config.kfold_k, scorer=scorer, sample_weight=None, save=False, raw=False, verbose=True):
    '''
    X_df: feature
    y_df : ['orderid', 'room_id', 'orderlabel']
    '''

    if raw:
        X = X_df
    else:
        X = X_df.values
    y = y_df['orderlabel'].values

    all_score = 0
    for i, (train_index, test_index) in enumerate(st.get_train_test_index(X, y, True, kfold_k)):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        if verbose:
            print('start_training...')
        model = use_model(**model_para)
        model.fit(X_train, y_train, sample_weight=sample_weight)
        score = scorer(model, y_df, test_index, X_test)
        all_score += score
        if verbose:
            print('Fold %d/%d Score: %f ' % (i + 1, kfold_k, score))
        if save:
            with open(config.pj_root + 'model/%s-fold-%d.mo' % (use_model.__name__, i + 1), 'wb') as f:
                pickle.dump(model, f)
    if verbose:
        print('Avg Score: %f ' % (all_score / kfold_k))
    return all_score / kfold_k


def train_all(X_df, y_df, use_model=config.model, model_para=config.model_para, sample_weight=None, raw=False, tag='', model_name=None):
    '''
    X_df: feature
    y_df : ['orderid', 'room_id', 'orderlabel']
    '''

    if raw:
        X = X_df
    else:
        X = X_df.values
    y = y_df['orderlabel'].values

    print('start training...')
    model = use_model(**model_para)
    evals_result = model.fit(X, y, sample_weight=sample_weight)
    if model_name is None:
        model_file_name = config.pj_root + 'model/%s_%s.mo' % (use_model.__name__, tag)
    else:
        model_file_name = config.pj_root + 'model/' + model_name + '_' + tag + '.mo'
    with open(model_file_name, 'wb') as f:
        pickle.dump(model, f)
        print('saved to: ', model_file_name)
    return evals_result


def train_xgboost_step(X_df, y_df, use_model=config.model, model_para=config.model_para, kfold_k=config.kfold_k, sample_weight=None, use_piece=0, early_stopping_rounds=20, n_estimators=None, verbose=True, raw=False):
    if raw:
        X = X_df
    else:
        X = X_df.values
    y = y_df['orderlabel'].values

    for i, (train_index, test_index) in enumerate(st.get_train_test_index(X, y, True, kfold_k)):
        if i == use_piece:
            break

    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y[train_index], y[test_index]

    gt = y_df[['orderid', 'orderlabel']].iloc[test_index]
    N = len(gt[gt.orderlabel == 1])

    def eval_metric(y_pred, y_true):
        y_true = y_true.get_label()
        table = gt.join(pd.DataFrame(y_pred, index=gt.index, columns=['pred_proba']))
        pred_every_order = table.groupby('orderid').apply(lambda x: x[x.pred_proba == x.pred_proba.max()].iloc[0]['orderlabel'])
        return 'V', - len(pred_every_order[pred_every_order == 1]) / N

    if n_estimators is not None:
        model_para['n_estimators'] = n_estimators

    model = use_model(**model_para)
    model.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds, sample_weight=sample_weight, verbose=verbose)
    return model


def feature_importance_xgboost(X_df, y_df, n_models, model_para_ref={}, use_seed=False, task='classification', kfold=False, kfold_k=config.kfold_k, kfold_shuffle=False, scorer=scorer, verbose=False):
    '''
    计算多次xgboost模型，给出所有特征的重要程度打分

    输入参数说明：
        y: 代表目标列的字符， 如target
        df: DataFrame。除了y的列剩下的列都用做训练
        n_models: 训练多少次
        model_para_ref: 形如：
                para_ref =    {
                    'max_depth': [2, 3, 4],
                    'learning_rate': [0.1, 0.5, 1.0, 1.5],
                    'objective': ['rank:pairwise']
                }
                每一个参数都给出一些候选。会随机在这些里面选择n_models个参数训练
        use_seed: 如果为True，每次训练还会强行加入一个随机参数seed。
        task: classification表示使用XGBClassifier，否则使用XGBRegressor
        kfold、kfold_k、kfold_shuffle、scorer: kfold=True时这几个参数才有效果：
                kfold_k: 使用多少折
                kfold_shuffle: kfold时是否使用shuffle打乱
                scorer: 形如scorer(model, X, y_gt)的函数，输出一个数表示模型的打分。使用kfold必须传入该函数否则kfold无意义。
        verbose: 是否输出详细信息

    输出结果：
        kfold=False时，输出result，代表每个feature的importance，从大到小排列。
        kfold=True时，输出result，avg_score，avg_score代表模型的平均打分。

    举例：
    如只想尝试一组参数，在不同seed下的效果：
        model_para_ref = {
            'depth': [1],
            'learning_rate': [0.1]
        }
        use_seed=True
    '''

    X = X_df.values
    y = y_df['orderlabel'].values

    logger = logging.getLogger('feature_importance_xgboost')
    feature_name = X_df.columns.tolist()
    feature_importance = np.zeros(len(feature_name))
    score = 0
    score_n = 0

    for i in range(n_models):
        para = {}
        for key in model_para_ref:
            choice = model_para_ref[key][random.randint(0, len(model_para_ref[key]) - 1)]  # 在每个参数中都随机选取一个
            para[key] = choice
        if use_seed:
            para['seed'] = random.randint(0, 10000)

        if verbose:
            logger.warning('model %d / %d' % (i + 1, n_models))
            logger.warning('use para: %s' % str(para))

        # 分离x_train, y_train
        if kfold:
            for i, (train_index, test_index) in enumerate(st.get_train_test_index(X, y, kfold, kfold_k, kfold_shuffle)):
                X_train, X_test = X[train_index], X[test_index]
                y_train, y_test = y[train_index], y[test_index]
                if task == 'classification':
                    model = xgboost.XGBClassifier(**para)
                else:
                    model = xgboost.XGBRegressor(**para)
                model.fit(X_train, y_train)
                score_i = scorer(model, y_df, test_index, X_test)
                score += score_i
                score_n += 1
                if verbose:
                    logger.warning('model %d score %f' % (i + 1, float(score_i)))
        else:
            if task == 'classification':
                model = xgboost.XGBClassifier(**para)
            else:
                model = xgboost.XGBRegressor(**para)
            model.fit(X, y)
            feature_importance += model.feature_importances_

    if kfold is True:
        feature_importance = feature_importance / score_n
    else:
        feature_importance = feature_importance / n_models

    result = pd.DataFrame(feature_importance, index=feature_name, columns=['importance'])
    result = result.sort_values('importance', ascending=False)

    if kfold is True:
        return result, float(score) / score_n
    else:
        return result
