
from sklearn.model_selection import GridSearchCV
import  os
from sklearn.feature_selection import SelectKBest, chi2
import joblib
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from xgboost import XGBRegressor, XGBClassifier

def data_train():
#TODO 第一步：加载数据
    df=pd.read_csv('../../data/raw/train.csv')
    # print(df.info())

    #TODO 第二步：热编码
    #进行热编码 LabelEncoder：有序分类变量（这样可以转换成1列，而onehot和这个labelencoder一样作用，只不过LabelEncoder会返回一列）
    #将所有的object类型提取出来
    ordered_cols = df.select_dtypes(include='object').columns
    le = LabelEncoder()
    #循环对提取出来的object依次热编码
    for col in ordered_cols:
        if col in df.columns:
            df[col] = le.fit_transform(df[col])
    # print(df.info())
    #删除一些不重要的字段
    # df.pop('EmployeeNumber')


    # #卡方检验
    # # 拆分特征和目标
    # X = df.drop(columns=['Attrition'])
    # y = df['Attrition']
    # # 卡方检验（前 10 个）
    # chi_selector = SelectKBest(score_func=chi2, k=15)
    # chi_selector.fit(X, y)
    #
    # # 输出每个特征的卡方得分
    # chi_scores = pd.Series(chi_selector.scores_, index=X.columns)
    # chi_scores = chi_scores.sort_values(ascending=False)
    #
    # print("\n📊 卡方检验 Top 15 特征（得分越高越相关）：")
    # print(chi_scores.head(15))




    # #TODO 第三步：3.1 选取特征列，划分数据集
    # feature_names=['MonthlyIncome', 'EmployeeNumber', 'TotalWorkingYears', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsWithCurrManager', 'Age', 'DistanceFromHome', 'OverTime', 'YearsSinceLastPromotion']
    # feature_names=['MonthlyIncome', 'Age', 'DistanceFromHome', 'EmployeeNumber', 'TotalWorkingYears',  'StockOptionLevel',  'PercentSalaryHike', 'YearsAtCompany','YearsWithCurrManager','NumCompaniesWorked']
    #feature_names=['MonthlyIncome','EmployeeNumber','TotalWorkingYears','YearsAtCompany','YearsInCurrentRole','YearsWithCurrManager','Age','DistanceFromHome','OverTime','YearsSinceLastPromotion','StockOptionLevel','JobLevel','MaritalStatus','JobRole','JobSatisfaction']
    feature_names=['OverTime','StockOptionLevel','JobLevel','JobRole','MaritalStatus','TotalWorkingYears','Age','JobInvolvement','YearsWithCurrManager','YearsInCurrentRole','JobSatisfaction','YearsAtCompany','EnvironmentSatisfaction','NumCompaniesWorked','WorkLifeBalance','MonthlyIncome']
    x=df[feature_names]
    y=df['Attrition']
    x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=85)#55
    # 创建xgboost模型对象  （极限梯度提升树）
        # #定义超参字典
    param_dict={
       'n_estimators':[50,100,150,200],
        # 'max_depth':[3,5,6,7,8],
        'max_depth':[3,5,6],
        # 'learning_rate':[0.01,0.1]
        'learning_rate':[0.01]
    }
    #TODO 3.2 网格搜索和交叉验证
    es=XGBClassifier(max_depth=3)
    #2-1：创建网格搜索
    gs_es =GridSearchCV(estimator=es,param_grid=param_dict,cv=5)#5
    #2-2:模型训练
    gs_es.fit(x_train,y_train)
    print(f"最优参数组合:{gs_es.best_params_}")

    # #TODO 3.3模型训练
    # es =XGBRegressor(n_estimators=100,max_depth=7,learning_rate=0.1)
    # es.fit(x_train,y_train)
    #TODO 3.4 模型预测
    y_pre = gs_es.predict_proba(x_test)[:,1]
    #TODO 第四步： 模型评估 AUC
    print(f'预测的AUC为{roc_auc_score(y_test,y_pre)}')

    #TODO 第五步：模型保存

    #模型保存
    joblib.dump(gs_es, "model2")

if __name__ == '__main__':
    data_train()