# matplotlib.use('TkAgg')  # 设置 matplotlib 后端为 TkAgg
import sys
from pathlib import Path
import seaborn as sns
ROOT = Path(__file__).resolve().parent.parent
sys.path.append(str(ROOT))
import os
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import numpy as np
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from utils.log import Logger
from utils.common import data_preprocessing
import xgboost as xgb
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, GridSearchCV
from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, f1_score,classification_report
import joblib
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier

class PowerLoadModel(object):
    def __init__(self, path):
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        self.data_source = data_preprocessing(path)


def ana_data(data):
    ana_data = data.copy()
    fig = plt.figure(figsize=(40, 80))
    # 年龄分布情况
    ax1 = fig.add_subplot(421)
    ax1.hist(ana_data['Age'], bins=20)
    ax1.set_title('年龄整体分布情况')
    ax1.set_xlabel('年龄')

    # 月收入分布情况
    ax2 = fig.add_subplot(422)
    ax2.hist(ana_data['MonthlyIncome'], bins=20)
    ax2.set_title('月收入整体分布情况')
    ax2.set_xlabel('月收入')

    # 各部门平均月收入(中等)
    department_income_mean = ana_data.groupby('Department', as_index=False)['MonthlyIncome'].mean()
    ax3 = fig.add_subplot(423)
    ax3.bar(department_income_mean['Department'], department_income_mean['MonthlyIncome'])
    ax3.set_title('各部门平均月收入')
    ax3.set_xlabel('部门')

    # 不同商务旅行情况的离职率
    business_travel_attrition = ana_data.groupby('BusinessTravel')['Attrition'].mean()
    ax4 = fig.add_subplot(424)
    ax4.bar(business_travel_attrition.index, business_travel_attrition.values)
    ax4.set_title('不同商务旅行情况的离职率')
    ax4.set_xlabel('商务旅行情况')

    # 不同工作级别平均工作年限(高)
    job_level_years = ana_data.groupby('JobLevel', as_index=False)['TotalWorkingYears'].mean()
    ax5 = fig.add_subplot(425)
    ax5.plot(job_level_years['JobLevel'], job_level_years['TotalWorkingYears'], color='blue')
    ax5.set_title('不同工作级别平均工作年限')
    ax5.set_xlabel('工作级别')

    # 不同教育领域的平均绩效评级(低)
    education_field_performance = ana_data.groupby('EducationField', as_index=False)['PerformanceRating'].mean()
    ax6 = fig.add_subplot(426)
    ax6.bar(education_field_performance['EducationField'], education_field_performance['PerformanceRating'])
    ax6.set_title('不同教育领域的平均绩效评级')
    ax6.set_xlabel('教育领域')

    # 加班与非加班的平均工作满意度
    overtime_satisfaction = ana_data.groupby('OverTime')['JobSatisfaction'].mean()
    ax7 = fig.add_subplot(427)
    ax7.bar(overtime_satisfaction.index, overtime_satisfaction.values)
    ax7.set_title('加班与非加班的平均工作满意度')
    ax7.set_xlabel('是否加班')

    # 婚姻状况与离职率
    marital_status_attrition = ana_data.groupby('MaritalStatus')['Attrition'].mean()
    ax8 = fig.add_subplot(428)
    ax8.bar(marital_status_attrition.index, marital_status_attrition.values)
    ax8.set_title('婚姻状况与离职率')
    ax8.set_xlabel('婚姻状况')
    plt.savefig('../data/fig/人才流失特征分析图.png')

    fig1 = plt.figure(figsize=(40, 20))
    # 特征相关性矩阵热力图
    ax9 = fig1.add_subplot()
    # 选择数值型特征计算相关性
    numeric_features = ana_data.select_dtypes(include=[np.number])
    corr_matrix = numeric_features.corr()
    # 绘制热力图
    sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', fmt='.2f', ax=ax9)
    ax9.grid(alpha=0.5)
    ax9.set_title('特征相关性矩阵热力图')
    ax9.set_xticklabels(ax9.get_xticklabels(), rotation=45)
    plt.savefig('../data/fig/人才流失特征分析热力图.png')
    plt.show()


def feature_engineering(data):
    """
    核心特征：BusinessTravel、OverTime、MaritalStatus
    补充特征：Department、JobLevel、MonthlyIncome
    可尝试特征交互：
    OverTime × MonthlyIncome（加班与收入的平衡）
    JobLevel × TotalWorkingYears（职业发展匹配度）
    :param data:
    :param logger:
    :return:
    """
    feature_data = data.copy()
    # 商务旅行情况
    travel_dummies = pd.get_dummies(feature_data['BusinessTravel'], prefix='Travel')
    feature_data = pd.concat([feature_data, travel_dummies], axis=1)
    # 是否加班
    feature_data['OverTime'] = feature_data['OverTime'].map({'Yes': 1, 'No': 0})
    # 性别
    feature_data['Gender'] = feature_data['Gender'].map({'Male': 1, 'Female': 0})
    # 婚姻状况
    marital_dummies = pd.get_dummies(feature_data['MaritalStatus'], prefix='Marital')
    feature_data = pd.concat([feature_data, marital_dummies], axis=1)
    # 部门
    department_dummies = pd.get_dummies(feature_data['Department'], prefix='Dept')
    feature_data = pd.concat([feature_data, department_dummies], axis=1)
    # 工作等级
    job_level_dummies = pd.get_dummies(feature_data['JobLevel'], prefix='JobLevel')
    feature_data = pd.concat([feature_data, job_level_dummies], axis=1)
    # EducationField
    education_field_dummies = pd.get_dummies(feature_data['EducationField'], prefix='education')
    feature_data = pd.concat([feature_data, education_field_dummies], axis=1)

    # 添加:
    # 添加工作角色特征
    jobrole_dummies=pd.get_dummies(feature_data['JobRole'], prefix='jobrole')
    feature_data = pd.concat([feature_data, jobrole_dummies], axis=1)

    # OverTime × MonthlyIncome（加班与收入的平衡）
    feature_data['OvertimeIncome'] = feature_data['OverTime'] * feature_data['MonthlyIncome']
    # JobLevel × TotalWorkingYears（职业发展匹配度）
    feature_data['CareerMatch'] = feature_data['JobLevel'] * feature_data['TotalWorkingYears']

    # 构建特征列列表

    # 原始:
    # feature_columns = ['Attrition', 'Age', 'OverTime', 'DistanceFromHome',
    #                    'CareerMatch', 'EnvironmentSatisfaction', 'Gender', 'JobInvolvement', 'JobSatisfaction',
    #                    'NumCompaniesWorked', 'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel',
    #                    'TotalWorkingYears', 'TrainingTimesLastYear',
    #                    'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
    #                    'YearsWithCurrManager'] + list(travel_dummies.columns) + list(marital_dummies.columns) + list(
    #     department_dummies.columns) + list(job_level_dummies.columns) + list(education_field_dummies.columns)

    feature_columns = (['Attrition',
                       'Age',
                       'OverTime',
                       # 'DistanceFromHome',
                       # 'CareerMatch',
                       'EnvironmentSatisfaction',
                       'Gender',
                        'JobInvolvement',
                       'JobSatisfaction',
                       'NumCompaniesWorked',
                       'PerformanceRating',
                       'RelationshipSatisfaction',
                       'StockOptionLevel',
                       # 'TotalWorkingYears',
                       # 'TrainingTimesLastYear',
                       'WorkLifeBalance',
                       # 'YearsAtCompany',
                       'YearsInCurrentRole',
                       # 'YearsSinceLastPromotion',
                       # 'YearsWithCurrManager',
                        'Education',
                        'PercentSalaryHike']
                        # 'StandardHours'] # 有没有无影响

                       + list(travel_dummies.columns)  # BusinessTravel
                       + list(marital_dummies.columns) # MaritalStatus
                       + list(department_dummies.columns)  # Department
                       + list(job_level_dummies.columns)  # MaritalStatus
                       + list(education_field_dummies.columns) # EducationField
                       + list(jobrole_dummies.columns)) # JobRole

    return feature_data[feature_columns], feature_columns


def model_train(data, features):
    x = data[features[1:]]
    y = data[features[0]]
    x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=25, test_size=0.2,stratify=y) # 不均衡数据


# 逻辑回归:初始模型的AUC值: 0.8564311594202898
    # lr = LogisticRegression(max_iter=1000)
    # lr.fit(x_train, y_train)
    # # 7. 初始模型的AUC值
    # y_pred_lr = lr.predict_proba(x_test)[:, 1]
    # print('初始模型的AUC值:', roc_auc_score(y_test, y_pred_lr))

#决策树   初始模型决策树AUC值: 0.6871980676328503
    # dt = DecisionTreeClassifier()
    # dt.fit(x_train, y_train)
    # # 7. 初始模型决策树AUC值
    # y_pred_dt = dt.predict_proba(x_test)[:, 1]
    # print('初始模型决策树AUC值:', roc_auc_score(y_test, y_pred_dt))

#随机森林  初始模型随机森林AUC值: 0.8375603864734299
    # rf = RandomForestClassifier()
    # rf.fit(x_train, y_train)
    # y_pred_rf = rf.predict_proba(x_test)[:, 1]
    # print('初始模型随机森林AUC值:', roc_auc_score(y_test, y_pred_rf))
#交叉验证网格搜索(0.8431461352657005)

    # estimator = RandomForestClassifier()
    # param = {"n_estimators": [10, 20, 30, 40, 50, 60, 70], "max_depth": [2, 4, 6, 8, 10], "random_state": [9]}
    # grid_search = GridSearchCV(estimator, param_grid=param, cv=2)
    # grid_search.fit(x_train, y_train)
    # accuracy = grid_search.score(x_test, y_test)
    # print("随机森林网格搜索accuracy:", accuracy)
    # # 输出: RandomForestClassifier(max_depth=6, n_estimators=10, random_state=9)
    # # 就可以根据最优超参组合用模型
    # print(grid_search.best_estimator_)

    # rf_best = RandomForestClassifier(max_depth=8, n_estimators=30, random_state=9)
    # rf_best.fit(x_train, y_train)
    # y_pred_rf_best = rf_best.predict_proba(x_test)[:, 1]
    # print('RandomForest 交叉验证网格搜索的AUC值:', roc_auc_score(y_test, y_pred_rf_best))

 # AdaBoost(初始模型AdaBoostAUC值: 0.8946256038647344)
    #mytree = DecisionTreeClassifier(criterion='entropy', max_depth=1, random_state=0)
    #myada = AdaBoostClassifier(estimator=mytree, n_estimators=500, learning_rate=0.1, random_state=0)
    #myada.fit(x_train, y_train)
    # y_pred_myada = myada.predict_proba(x_test)[:, 1]
    # print('初始模型AdaBoostAUC值:', roc_auc_score(y_test, y_pred_myada))
    #oblib.dump(myada, '../model/rc_20250606.pkl')
# DBDT(初始模型DBDTAUC值: 0.8390700483091788)
#     gbdt = GradientBoostingClassifier()
#     gbdt.fit(x_train, y_train)
#     # 7. 初始模型决策树AUC值
#     y_pred_gbdt = gbdt.predict_proba(x_test)[:, 1]
#     print('初始模型DBDTAUC值:', roc_auc_score(y_test, y_pred_gbdt))

# xgboost(0.8719806763285024)
    estimator = xgb.XGBClassifier(
        # n_estimators=100,
        # learning_rate=0.1,
        # max_depth=3,
        # random_state=25

    n_estimators = 500,  # 413
        # booster='gbtree',
        # gamma=0.2,
    max_depth = 3,  # 3
    learning_rate = 0.03,  # 0.06
    subsample = 0.69,  # 0.69
    colsample_bytree = 0.60,  # 0.63
    # reg_alpha = 0.6,  # 0.6
    # reg_lambda = 1.87,  # 1.87
    min_child_weight = 2,
    eval_metric = 'logloss',
    # use_label_encoder=False
    )
    estimator.fit(x_train, y_train)

    # 预测概率
    y_pred_proba = estimator.predict_proba(x_test)[:, 1]
    # y_pred = estimator.predict(x_test)

    # 模型评估
    print(f'train_AUC_score{roc_auc_score(y_test,y_pred_proba)}')
    # print(classification_report(y_test, y_pred_proba))
    # print(accuracy_score(y_test, y_pred))
    joblib.dump(estimator, '../model/rc_20250607.pkl')
    # joblib.dump(gbdt, '../model/rc_20250607_gbdt.pkl')



# 创建lgb对象(初始模型lgbm的AUC值: 0.8268417874396135)
    # lgbm = LGBMClassifier(boosting_type='gbdt', objective='binary', metric='auc',
    #                       learning_rate=0.3, n_estimators=100, max_depth=3, num_leaves=20,
    #                       max_bin=45, min_data_in_leaf=6, bagging_fraction=0.6, bagging_freq=0,
    #                       feature_fraction=0.8)
    # # 使用这个对象训练lgb模型
    # lgbm.fit(x_train, y_train, eval_set=[(x_train, y_train), (x_test, y_test)], eval_metric='auc')
    # # 7. 初始模型xgb的AUC值
    # y_pred_lgbm = lgbm.predict_proba(x_test)[:, 1]
    # print('初始模型lgbm的AUC值:', roc_auc_score(y_test, y_pred_lgbm))



if __name__ == '__main__':
    pm = PowerLoadModel('../data/train.csv')
    ana_data(pm.data_source)
    feature_data, feature_columns = feature_engineering(pm.data_source)
    model_train(feature_data, feature_columns)