import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score, accuracy_score, roc_auc_score,root_mean_squared_error
import joblib
import datetime
from utils.common import get_path
from utils.feature_engineering import feature_processing,feature_extra
from utils.log import Logger
from sklearn.metrics import make_scorer, roc_auc_score
import numpy as np
from imblearn.over_sampling import SMOTENC
import lightgbm as lgb
from catboost import CatBoostClassifier
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']  # macOS 支持中文
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 15

class ChurnModel:
    def __init__(self,path):

        self.model_path = None
        self.model = None
        logfile_name = 'train_' + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        self.logfile = Logger("../", logfile_name).get_logger()
        self.logfile.info("开始创建人才项目的训练模型对象")
        self.scalar = None
        self.encoder_dict = None
        self.data_source = pd.read_csv(get_path(path))



    def model_train(self):

        df=self.data_source.copy()
        #df = clean_outliers(df)
        x = feature_extra(df)

        y = df[['Attrition']]

        x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.3, random_state=42)
        x_train, self.scalar, self.encoder_dict = feature_processing(x_train, is_train=True)
        x_test,_,_ = feature_processing(x_test, is_train=False,scaler=self.scalar,encoder_dict=self.encoder_dict)

        # ==============================
        # 1. 使用 LightGBM # AUC 0.79
        # ==============================
        # model = lgb.LGBMClassifier(
        #     objective='binary',
        #     metric='auc',
        #     boosting_type='gbdt',
        #     random_state=22,
        #     n_estimators=500,
        #     num_leaves=83,
        #     max_depth=7,
        #     learning_rate=0.01,
        #     feature_fraction=0.6,
        #     bagging_fraction=0.6,
        #
        # )
        #
        # model.fit(x_train, y_train)
        # y_pred = model.predict(x_test)
        # y_pred_proba = model.predict_proba(x_test)[:,1]
        # lgb_auc = roc_auc_score(y_test,y_pred_proba)
        # print(f"LGB AUC:{lgb_auc}")
        # self.model = model


        # =========================================================================================================
        # 2. Stacking堆叠集成（第一层：base models:XGB,LGB,CATBoostClassifier  第二层 Meta Model：LogisticRegression）
        # AUC：0.82
        # =========================================================================================================
        # lgb_model = lgb.LGBMClassifier(
        #     objective='binary',
        #     boosting_type='gbdt',
        #     n_estimators=600,
        #     num_leaves=63,
        #     learning_rate=0.01,
        #     feature_fraction=0.8,
        #     bagging_fraction=0.8,
        #     random_state=22
        # )
        #
        # xgb_model = XGBClassifier(
        #     objective='binary:logistic',
        #     eval_metric='auc',
        #     n_estimators=900,
        #     max_depth=7,
        #     learning_rate=0.03,
        #     subsample=0.8,
        #     colsample_bytree=0.8,
        #     random_state=42,
        #     use_label_encoder=False
        # )
        #
        # cat_model = CatBoostClassifier(
        #     iterations=800,
        #     learning_rate=0.01,
        #     depth=7,
        #     eval_metric='AUC',
        #     random_seed=42,
        #     verbose=False
        # )
        # estimators = [
        #     ('xgb', xgb_model),
        #     ('lgb', lgb_model),
        #     ('cat', cat_model)
        # ]
        #
        # stack_model = StackingClassifier(
        #     estimators=estimators,
        #     final_estimator=LogisticRegression(max_iter=1000),
        #     cv=5,
        #     n_jobs=-1,
        #     passthrough=True  # 将原始特征传给第二层
        # )
        #
        #
        # print("\n===== 开始训练 Stacking 集成模型 =====")
        # stack_model.fit(x_train, y_train)
        # val_pred = stack_model.predict_proba(x_test)[:, 1]
        # val_auc = roc_auc_score(y_test, val_pred)
        # print(f"验证集 AUC: {val_auc:.4f}")
        # self.model = stack_model

        # ==============================
        # XGB AUC 0.86
        # ==============================
        xgb = XGBClassifier(
            objective='binary:logistic',
            eval_metric='auc',
            random_state=25,
            n_jobs=-1,
            learning_rate=0.03,
            max_depth=7,
            n_estimators=900,
            subsample=1.0,


        )
        xgb.fit(x_train, y_train)
        y_pred_prob = xgb.predict_proba(x_test)[:, 1]
        auc = roc_auc_score(y_test, y_pred_prob)
        self.logfile.info(f"验证集 AUC: {auc}")
        self.model = xgb
        print((f"验证集 AUC: {auc}"))
        print(f"精确度：{xgb.score(x_test, y_test)}")

        # ==============================
        # XGB 网格搜索 + 交叉验证
        # ==============================

        # 基础模型
        # xgb = XGBClassifier(
        #     objective='binary:logistic',
        #     eval_metric='auc',
        #     random_state=22,
        #     n_jobs=-1,
        #
        #
        # )
        # auc_scorer = make_scorer(roc_auc_score)
        #
        # # 网格搜索参数（先粗调再精调）
        # param_grid = {
        #     'n_estimators': [300,500,800],
        #     'max_depth': [3, 5, 7, 10],
        #     'learning_rate': [0.01, 0.03,0.05],
        #     #'subsample': [0.8, 1.0],
        #     #'colsample_bytree': [0.7, 0.9, 1.0],
        #     # 'min_child_weight': [1, 3, 5],
        #     #'gamma': [0, 0.1, 0.2],
        # }
        #
        # grid = GridSearchCV(
        #     estimator=xgb,
        #     param_grid=param_grid,
        #     scoring= auc_scorer,
        #     cv=5,
        #     verbose=2,
        #     n_jobs=-1
        # )
        #
        # grid.fit(x_train, y_train)
        #
        # self.logfile.info(f"Best params: {grid.best_params_}")
        # self.logfile.info(f"Best CV AUC: {grid.best_score_}")
        #
        # # 验证集效果
        # best_model = grid.best_estimator_
        # y_pred = best_model.predict(x_test)
        # y_pred_prob = best_model.predict_proba(x_test)[:, 1]
        # auc = roc_auc_score(y_test, y_pred_prob)
        # self.logfile.info(f"验证集 AUC: {auc}")
        # print((f"验证集 AUC: {auc}"))
        # self.model = best_model
        # cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        # auc_scorer = make_scorer(roc_auc_score)
        #
        # ratio = float((y_train==0).sum() / (y_train==1).sum())
        # xgb = XGBClassifier(
        # objective='binary:logistic', eval_metric='auc', n_jobs=-1, random_state=42,
        # scale_pos_weight=ratio
        #     )
        #
        # param_grid = {
        #     'learning_rate': [0.03, 0.05],
        #     'n_estimators': [600, 900],
        #     'max_depth': [4, 5, 6],
        #     'min_child_weight': [5, 8],
        #     'subsample': [0.75, 0.85],
        #     'colsample_bytree': [0.75, 0.85],
        #     'gamma': [0.1, 0.3],
        #     'reg_lambda': [1, 5, 10],
        #     'reg_alpha': [0, 0.5, 1.0],
        # }
        #
        # grid = GridSearchCV(xgb, param_grid, scoring=auc_scorer, cv=cv, n_jobs=-1, verbose=2)
        # grid.fit(x_train, y_train)
        # best = grid.best_estimator_
        # auc = roc_auc_score(y_test, best.predict_proba(x_test)[:,1])
        # print("Test AUC:", auc)



    def model_save(self):
        model_path = f'../models/xgb_model_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}.pkl'
        joblib.dump(self.model,model_path )
        self.logfile.info(f"模型保存成功，保存路径{model_path}")
        self.model_path = model_path










if __name__ == '__main__':
    cm = ChurnModel('data/train.csv')
    cm.model_train()
    cm.model_save()




