import pandas as pd
import sys
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
import multiprocessing
import datetime as dt
import logging
import os
from sklearn.metrics import mean_absolute_error
from tqdm import tqdm

join = os.path.join
dirname = os.path.dirname
CURRENT_PATH = dirname(os.path.realpath(__file__))
sys.path.append(CURRENT_PATH)
from utils_tsfresh import time_log
from utils_xgb import pseudo_huber_loss_obj, mae_huber_loss , more_uptostand_score

LOG_SAVE_PATH = ''
DATA_PATH = ''

'''
使用xgboost训练最终模型。

xgboost超参众多，这里编写一个半自动的调参框架自动训练：
1. 逐步、顺序进行训练并确定每一步的参数最优值
2. 只使用 gridsearchCV 即可，CV功能在此实现，不使用xgboost内部的CV功能
3. 记录每一步的目标统计量的
4. 产出默认参数的方法（产出最佳模型参数的结果）

考虑到业务目标，这里在gridsearchcv中筛选模型时，使用两个目标函数：
1. MAE
2. 业务目标函数: 更多的经销商的误差小于15%之内

input : CSV 入模的数据集
output : pkl 模型对象
output : logger log文件
'''


class ModelTune_auto(object):

    def __init__(self, model,  param_grids_list):

        self.param_grids_list = param_grids_list # 需要遍历的，逐步遍历，逐步将结果放到tuned_params 中，若tuned_params有对象则需要使用以固定
        self.gridseach_params = {'verbose':0 ,'cv': 3 ,'n_jobs': multiprocessing.cpu_count() // 1.5}
        self.regressor = model #
        self.tuned_params = {}

        self.info_logger = self.train_logger()
        
    @time_log('GridSearchCV参数寻优')
    def gridsearch_tune(self, Xtrain, Ytrian, Xtest, Ytest):
        '''
        按照顺序逐步调参
        网格搜索的目标设定为MAE 与 业务指标
        :return:
        '''
        
        temp_dict = {}
        for cv_params in tqdm(self.param_grids_list):
            gridsearch_param = self.get_dict_key(cv_params)
            if len(temp_dict) == 0:
                gs = GridSearchCV(self.regressor, cv_params,\
                                  refit='MAE',  \
                                  verbose=self.gridseach_params['verbose'], \
                                  cv=self.gridseach_params['cv'],\
                                  n_jobs=self.gridseach_params['n_jobs'],\
                                  scoring ={'MAE':'neg_mean_absolute_error','socre_':more_uptostand_score}
                                  )
                gs.fit(Xtrain,Ytrian)
                best_params_dict = gs.best_params_
                temp_dict.update(best_params_dict)
            else:
                temp_dict.update(cv_params)
                gs = GridSearchCV(self.regressor, temp_dict,\
                                  refit='MAE', \
                                  verbose=self.gridseach_params['verbose'], \
                                  cv=self.gridseach_params['cv'], \
                                  n_jobs=self.gridseach_params['n_jobs'],
                                  scoring={'MAE': 'neg_mean_absolute_error', 'socre_': more_uptostand_score}
                                  )
                gs.fit(Xtrain, Ytrian)
                best_params_dict = gs.best_params_
                del temp_dict[gridsearch_param]
                temp_dict.update(best_params_dict)
            
            self.tuned_params.update(best_params_dict)

            train_best_score = str(abs(gs.best_score_))
            ypred = gs.best_estimator_.predict(Xtest)
            mae_test = str(mean_absolute_error(Ytest, ypred))
            uptostand_test = str(more_uptostand_func(Ytest, ypred))
            tuned_params = str(self.tuned_params)
            logger_info = f'{tuned_params} - train_MAE:{train_best_score} - MAE:{mae_test} - UpToStand:{uptostand_test}'
            self.info_logger.info(logger_info)

    @staticmethod
    def train_logger():
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)
        today = str(dt.datetime.now())[:10]
        logger_name = '_'.join(['GridSearch',today]) + '.log'

        fh = logging.FileHandler(join(LOG_SAVE_PATH, logger_name))
        fh.setLevel(logging.DEBUG)

        formatter = logging.Formatter("%(asctime)s - %(message)s")  # 设置日志输出的格式
        fh.setFormatter(formatter)
        logger.addHandler(fh)
        
        return logger

    @staticmethod
    def get_dict_key(dict_):
        return list(dict_.keys())[0]
    
    
    
if __name__ == '__main__':
    X_data_train = pd.read_excel(join(DATA_PATH , ''))
    y_data_train = pd.read_excel(join(DATA_PATH , ''))
    X_data_test = pd.read_excel(join(DATA_PATH , ''))
    y_data_test = pd.read_excel(join(DATA_PATH , ''))
    
    param_grids_list = [
        {'n_estimators': range(400, 1000, 50)},
        {'max_depth': [7, 9]},
        {'min_child_weight': [1, 3, 5, 7]},
        {'gamma': [0.05, 0.1, 0.3]},
        {'subsample': [0.7, 0.8, 0.9]},
        {'colsample_bytree': [0.7, 0.8, 0.9]},
        {'reg_alpha': [0, 0.1, 0.5, 1.0, 10, 100, 200, 1000]},
        {'reg_lambda': [0.01, 0.1, 1.0, 10, 100, 200, 1000]},
        {'learning_rate': [0.01, 0.025, 0.05, 0.1]}
    ]
    
    XGBregressor_base_params = {'objective':'reg:linear', \
                                'booster':'gbtree', \
                                'eval_metric':'mae',\
                                'tree_method': 'hist'} # 加速迭代
    XGBregressor = xgb.XGBRegressor(**XGBregressor_base_params , obj = pseudo_huber_loss_obj,early_stopping_rounds = 150)

    XGBreg_tune = ModelTune_auto(XGBregressor , param_grids_list)
    XGBreg_tune.gridsearch_tune(X_data_train, y_data_train, X_data_test, y_data_test)



