# -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
import pandas as pd
import joblib
from sklearn.model_selection import train_test_split, GridSearchCV
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, roc_auc_score, classification_report
from warnings import filterwarnings
# 引用外部自定义模块
from utils.log import Logger
from utils.common import preprocessing, LOG_TIME_FORMAT
filterwarnings('ignore', module='sklearn')  # 忽略警告信息

# 日志配置
logfile_path = f'train_{datetime.now().strftime(LOG_TIME_FORMAT)}'
logfile = Logger('../', logfile_path, level='INFO').log()
# 模型配置
model_config = {
    'name': 'XGBoost',
    'estimator': XGBClassifier(
        random_state=0,
        eval_metric='logloss',
        reg_alpha=0.1,
        reg_lambda=1.0,
        colsample_bytree=0.8,
        subsample=0.2
    ),
    'params': {
        'n_estimators': [100, 200, 300],
        'max_depth': [3, 5, 7],
        'learning_rate': [0.03, 0.05, 0.07]
    }
}
# 读取数据、筛选特征列
column_names = [
    'Attrition', 'Age', 'MonthlyIncome', 'OverTime',
    'JobSatisfaction', 'YearsAtCompany', 'WorkLifeBalance'
]
talent_loss = preprocessing('../data/train.csv', column_names=column_names)


def train_models(x_train, x_test, y_train, y_test):
    # 训练和评估所有模型
    start = time.time()
    logfile.info(f'开始训练{model_config["name"]}模型...')
    # 模型训练
    grid = GridSearchCV(
        estimator=model_config['estimator'],
        param_grid=model_config['params'],
        scoring='roc_auc', cv=5
    )
    grid.fit(x_train, y_train)
    # 模型评估
    y_predict = grid.best_estimator_.predict(x_test)
    y_predict_proba = grid.best_estimator_.predict_proba(x_test)[:, 1]
    accuracy = accuracy_score(y_test, y_predict)
    auc_score = roc_auc_score(y_test, y_predict_proba)
    report = classification_report(y_test, y_predict)
    end = time.time()
    logfile.info(f'{model_config["name"]}模型训练完成, 耗时{end - start}秒')
    logfile.info(f'准确率: {accuracy}, AUC值: {auc_score}, 最佳参数: {grid.best_params_}')
    return {
        'name': model_config['name'],
        'accuracy': accuracy,
        'auc_score': auc_score,
        'best_params': grid.best_params_,
        'best_estimator': grid.best_estimator_,
        'report': report
    }


def train(data: pd.DataFrame):
    """
    ## train\n
    Model training module, conducting model training, evaluating and saving
    :param data: Feature engineering processed data
    """
    # 对分类变量进行标签编码
    data = pd.get_dummies(data, columns=['OverTime'])
    # 划分特征和目标变量
    X = data.drop(['Attrition'], axis=1)
    Y = data['Attrition']
    # 数据集划分
    logfile.info('开始数据集划分...')
    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
    results = train_models(x_train, x_test, y_train, y_test)
    print(f'\033[1;32m{results["name"]}模型准确率:\033[0m {results["accuracy"]}')
    print(f'\033[1;32m{results["name"]}模型AUC值:\033[0m {results["auc_score"]}')
    print(f'\033[1;32m{results["name"]}最佳参数:\033[0m {results["best_params"]}')
    print(f'\033[1;32m{results["name"]}模型报告:\033[0m\n{results["report"]}')
    # 保存最优模型
    model_path = '../model/xgb_talent_loss.pkl'
    joblib.dump(results['best_estimator'], model_path)
    logfile.info(f'`{os.path.basename(model_path)}` 模型保存完成')


if __name__ == '__main__':
    try:
        train(talent_loss)
    except Exception as e:
        logfile.error(e)
