import datetime
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import roc_auc_score
from sklearn.utils.class_weight import compute_sample_weight
import joblib

def model_CV_train(X_train, Y_train, logger, param_dict):
    logger.info("=========开始交叉网格处理===================")
    # 1.数据集切分
    x_data = X_train
    y_data = Y_train
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, random_state=6)
    # 2.网格化搜索与交叉验证
    # 2.1备选的超参数
    print("开始网格化搜索")
    print(datetime.datetime.now())

    # 2.2实例化网格化搜索，配置交叉验证
    grid_cv = GridSearchCV(estimator=XGBClassifier(),
                           param_grid=param_dict,
                           cv=5,
                           scoring="roc_auc",
                           verbose=1)
    # 2.3网格化搜索与交叉验证训练
    grid_cv.fit(x_train, y_train)
    # 2.4输出最优的超参数组合
    print(grid_cv.best_params_)
    print("最佳评分:", grid_cv.best_score_)
    print("结束网格化搜索")
    print(datetime.datetime.now())
    # logger.info("网格化搜索后找到的最优的超参数组合是：learning_rate: 0.1, max_depth: 6, n_estimators: 150")

def model_train(X_train, Y_train, logger, n_estimators, max_depth, learning_rate):

    logger.info("=========开始训练模型===================")
    # 1.数据集切分
    x_data = X_train
    y_data = Y_train
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, random_state=6, stratify=y_data)
    cls_weight = compute_sample_weight('balanced', y_train)


    # 2.模型训练
    # 2.1创建XGBoost对象
    xgb = XGBClassifier(n_estimators=n_estimators,
                        max_depth=max_depth,
                        learning_rate=learning_rate,)
    # 2.2模型训练
    xgb.fit(x_train, y_train, sample_weight=cls_weight)
    # 3.模型评价
    # 3.1模型在训练集上的预测结果
    y_pred_train = xgb.predict(x_train)
    # 3.2模型在测试集上的预测结果
    y_pred_test = xgb.predict(x_test)


    # 3.3模型在训练集上的MSE、MAPE
    mse_train = mean_squared_error(y_true=y_train, y_pred=y_pred_train)
    mae_train = mean_absolute_error(y_true=y_train, y_pred=y_pred_train)
    print(f"模型在训练集上的均方误差：{mse_train}")
    print(f"模型在训练集上的平均绝对误差：{mae_train}")
    # 3.4模型在测试集上的MSE、MAPE
    mse_test = mean_squared_error(y_true=y_test, y_pred=y_pred_test)
    mae_test = mean_absolute_error(y_true=y_test, y_pred=y_pred_test)
    print(f"模型在测试集上的均方误差：{mse_test}")
    print(f"模型在测试集上的平均绝对误差：{mae_test}")
    print(f"模型在测试集上的auc:{roc_auc_score(y_test, y_pred_test)}")
    logger.info("=========================模型训练完成=============================")
    logger.info(f"模型在训练集上的均方误差：{mse_train}")
    logger.info(f"模型在训练集上的平均绝对误差：{mae_train}")
    logger.info(f"模型在测试集上的均方误差：{mse_test}")
    logger.info(f"模型在测试集上的平均绝对误差：{mae_test}")
    # 4.模型保存
    joblib.dump(xgb, '../model/xgb.pkl')