# 训练数据
# 1.导包
import pandas as pd
import matplotlib.pyplot as plt
import os
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error, root_mean_squared_error, \
    roc_auc_score, classification_report, confusion_matrix, RocCurveDisplay
# 导入解决数据不均衡的权重
from sklearn.utils.class_weight import compute_sample_weight
import joblib
# 导入分层抽取包
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV

# 2.解决中文乱码问题
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

#3.加载数据，分析了解数据
def ana_data(path1,path2):
    #3.1加载数据
    data = pd.read_csv(path1)
    data_test=pd.read_csv(path2)
    #3.2.了解数据
    print(f'数据信息:\n{data.info}')
    print(f'数据前五行展示:\n{data.head()}')
    print(f'数据的统计描述:\n{data.describe()}')
    # print(data['Over18'].value_counts(),data['StandardHours'].value_counts(),
    #       data['EmployeeNumber'].value_counts())
    #3.3统计 Attrition 中 0 和 1 的数量
    attrition_counts = data['Attrition'].value_counts()
    #3.4离职占比图
    # 设置标签和颜色
    labels = ['未离职', '已离职']
    colors = ['#FFE4E1', '#AFEEEE']
    # 绘制饼图，离职占比
    plt.figure(figsize=(8, 8))
    plt.pie(attrition_counts, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
    plt.title('员工离职情况占比', fontsize=18)
    plt.axis('equal')  # 确保饼图为圆形而非椭圆
    plt.tight_layout()
    # 显示图表
    plt.show()
    # 保存图片
    plt.savefig('../data/fig/离职情况饼图.png')

# 4.特征工程
def feature_engineering(data,data_test):
    result = data.copy(deep=True)
    result_test = data_test.copy(deep=True)

    #对字符串类都转为数值型，使用LabelEncoder
    # 初始化 LabelEncoder
    label_encoder = LabelEncoder()
    # 对 'BusinessTravel' 列进行编码
    result['BusinessTravel'] = label_encoder.fit_transform(result['BusinessTravel'])
    result_test['BusinessTravel'] = label_encoder.transform(result_test['BusinessTravel'])
    result['MaritalStatus'] = label_encoder.fit_transform(result['MaritalStatus'])
    result_test['MaritalStatus'] = label_encoder.transform(result_test['MaritalStatus'])
    # 对 'OverTime' 列进行编码
    result['OverTime'] = label_encoder.fit_transform(result['OverTime'])
    result_test['OverTime'] = label_encoder.transform(result_test['OverTime'])
    result['Gender'] = label_encoder.fit_transform(result['Gender'])
    result_test['Gender'] = label_encoder.transform(result_test['Gender'])
    result['JobRole'] = label_encoder.fit_transform(result['JobRole'])
    result_test['JobRole'] = label_encoder.transform(result_test['JobRole'])
    # print(result.info)
    x_train = result.loc[:, ['Age', 'DistanceFromHome', 'Education', 'Gender', 'MaritalStatus',
              'BusinessTravel', 'JobInvolvement', 'JobLevel', 'JobRole', 'MonthlyIncome', 'PercentSalaryHike',
              'StockOptionLevel',
              'OverTime', 'WorkLifeBalance', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager',
              'NumCompaniesWorked', 'YearsAtCompany', 'TotalWorkingYears',
              'EnvironmentSatisfaction', 'RelationshipSatisfaction', 'JobSatisfaction']]
    y_train = result.iloc[:, 0]
    x_test = result_test.loc[:, ['Age', 'DistanceFromHome', 'Education', 'Gender', 'MaritalStatus',
             'BusinessTravel', 'JobInvolvement', 'JobLevel', 'JobRole', 'MonthlyIncome', 'PercentSalaryHike',
             'StockOptionLevel',
             'OverTime', 'WorkLifeBalance', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager',
             'NumCompaniesWorked', 'YearsAtCompany', 'TotalWorkingYears',
             'EnvironmentSatisfaction', 'RelationshipSatisfaction', 'JobSatisfaction']]
    y_test = result_test.iloc[:, -1]
    return x_train,y_train,x_test,y_test

#5.交叉验证和网格搜索寻找模型超参数
def model_CV_train(x_train, y_train,  param_dict):

    # 5.1.数据集切分

    # TODO 了解单独训练时设置均衡问题的权重
    class_weight = compute_sample_weight('balanced', y_train)
    # model.fit(x_train, y_train, sample_weight=class_weight)
    # 5.2.网格化搜索与交叉验证
    # 5.2.1备选的超参数
    print("开始网格化搜索")

    # 5.2.2实例化网格化搜索，配置交叉验证
    grid_cv = GridSearchCV(estimator=XGBClassifier(),
                           param_grid=param_dict,
                           cv=4,
                           scoring="roc_auc",
                           verbose=1)
    # 5.2.3网格化搜索与交叉验证训练
    grid_cv.fit(x_train, y_train, sample_weight=class_weight)
    # 5.2.4输出最优的超参数组合
    print(grid_cv.best_params_)
    print("最佳评分:", grid_cv.best_score_)
    print("结束网格化搜索")

#6.模型训练
def model_train(x_train, y_train, x_test,y_test, n_estimators, max_depth, learning_rate):
    class_weight = compute_sample_weight('balanced', y_train)
    #6.1创建模型
    xgb = XGBClassifier(n_estimators=n_estimators,
                        max_depth=max_depth,
                        learning_rate=learning_rate,
                        reg_alpha=1.0,  # L1 正则化强度
                        reg_lambda=2.0,  # 禁用 L2（仅用 L1）
                        )
    #6.2模型训练
    xgb.fit(x_train, y_train, sample_weight=class_weight)
    #6.3模型预测
    # y_pred_train = xgb.predict(x_train)
    y_pred_test = xgb.predict_proba(x_test)[:, 1]


    # 混淆矩阵，
    # 先将概率转换为类别标签
    y_pred_test_class = (y_pred_test >= 0.5).astype(int)
    # 打印分类报告
    print(classification_report(y_test, y_pred_test_class))
    # 再计算混淆矩阵
    confu_m = confusion_matrix(y_test, y_pred_test_class)
    print("混淆矩阵：\n", confu_m)

    #6.4模型在测试集上的MSE、MAPE
    mse_test = mean_squared_error(y_true=y_test, y_pred=y_pred_test)
    mae_test = mean_absolute_error(y_true=y_test, y_pred=y_pred_test)
    print(f"模型在测试集上的均方误差：{mse_test}")
    print(f"模型在测试集上的平均绝对误差：{mae_test}")
    print(f"模型在测试集上的auc:{roc_auc_score(y_test, y_pred_test)}")
    RocCurveDisplay.from_estimator(xgb, x_test, y_test)
    plt.show()

if __name__ == '__main__':
    # 1.加载数据集
    input_file1 = os.path.join('../data/train.csv')
    input_file2 = os.path.join('../data/test2.csv')
    model=ana_data(input_file1,input_file2)
    # 2. 分析并加载数据
    data = pd.read_csv(input_file1)
    data_test = pd.read_csv(input_file2)

    # 3. 特征工程处理
    x_train, y_train, x_test, y_test = feature_engineering(data, data_test)

    # 4. 定义超参数搜索空间
    param_dict = {
        'n_estimators': [i for i in range(290,300)],
        'max_depth': [1],
        'learning_rate': [0.1,0.2,0.28,0.01]
    }
    #5. 调用模型交叉获取最优参数
    # model_CV_train(x_train, y_train,  param_dict)
    #298, 1, 0.28/505，1，0.1

    #0.28,1,293——0.8691
    #0.28,1,301——0.8688

    #6. 输入最优参数进行模型训练
    n_estimators=293
    max_depth=1
    learning_rate=0.28
    model_train(x_train, y_train, x_test,y_test, n_estimators, max_depth, learning_rate)



