import os
# 1. SMOTE过采样和RandomUnderSampler欠采样（来自imblearn库）
from imblearn.under_sampling import RandomUnderSampler
# 2. 用于串联过采样和欠采样步骤的Pipeline（来自imblearn库，注意不是sklearn的Pipeline）
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split, GridSearchCV
from lightgbm import LGBMClassifier
# pip install lightgbm -i https://pypi.tuna.tsinghua.edu.cn/simple
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from docutils.nodes import header
from matplotlib.lines import lineStyles
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, root_mean_squared_error , roc_auc_score,classification_report
from sklearn.preprocessing import StandardScaler
import sys
from pathlib import Path
import seaborn as sns
sys.path.append(str(Path(__file__).parent.parent))
from utils.log import Logger
from utils.common import data_preprocessing
import joblib
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

class TalentLossPredictModel:
    def __init__(self, path):
        # 拼接日志文件名 需求：train_ 年月日时分秒
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        # 创建日志对象
        self.logfile = Logger('../', logfile_name).get_logger()
        # 测试写
        self.logfile.info('开始对人才流失项目进行预测...')
        # 获取数据源
        self.data_source = data_preprocessing(path)


def ana_data(data):
    data=data.copy()
    d1=pd.read_csv('../data/train.csv')
    plt.figure(figsize=(8, 6))
    # 出差频率与离职率柱状图
    Business_Attrition=d1.groupby('BusinessTravel')['Attrition'].mean().reset_index()  #流失率随出差频率升高呈现单调递增趋势,可采用有序编码
    #Non-Travel   0.083333    Travel_Frequently   0.224390    Travel_Rarely   0.156290
    Business_Attrition.columns=['BusinessTravel','Attrition']
    Business_Attrition = Business_Attrition.set_index('BusinessTravel')
    Business_Attrition = Business_Attrition.reindex(['Non-Travel', 'Travel_Rarely', 'Travel_Frequently'])
    Business_Attrition = Business_Attrition.reset_index()
    # print(Business_Attrition)
    plt.bar(Business_Attrition['BusinessTravel'], Business_Attrition.Attrition)
    plt.xlabel('Business Travel')
    plt.ylabel('Attrition')
    plt.title('出差频率与离职情况')
    plt.savefig('../data/fig/出差频率与离职情况.png')
    plt.close()

                                # 不同年龄堆积柱状图
    age_attrition_count=data.groupby(['Age', 'Attrition'], observed=False).size().unstack(fill_value=0)
    ax=age_attrition_count.plot(kind='bar', stacked=True, color=['orange', 'red'], figsize=(10, 6))
    ax.set_title('(a) 不同年龄离职人数', fontsize=16)
    ax.set_xlabel('Age', fontsize=14)
    ax.set_ylabel('Count', fontsize=14)
    ax.legend(['未离职', '离职'], fontsize=12)
    plt.savefig('../data/fig/不同年龄离职人数.png')
    plt.close()
                         #月薪与离职对比
    bins = range(0, data['MonthlyIncome'].max() + 1000, 2000)  # 每 1000 元一个区间
    labels = [f'{i}-{i + 1000}' for i in bins[:-1]]
    # 2. 对月薪进行分箱
    data['IncomeRange'] = pd.cut(data['MonthlyIncome'], bins=bins, labels=labels, right=False)
    # 3. 计算每个区间的离职人数
    income_groups = data.groupby('IncomeRange')['Attrition'].agg(['sum', 'count']).reset_index()
    income_groups.columns = ['IncomeRange', '离职人数', '总人数']
    # 4. 绘制柱状图
    sns.barplot(x='IncomeRange', y='离职人数', data=income_groups)
    plt.title('月薪区间与离职率关系')
    plt.xlabel('月薪区间')
    plt.ylabel('离职人数')
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('../data/fig/月薪区间与离职率关系.png')
    plt.close()
                        # 加班与离职率对比如下
    overtime_rates = data.groupby('OverTime')['Attrition'].mean().reset_index()
    overtime_rates.columns = ['是否加班', '离职率']
    ax = sns.barplot(x='是否加班', y='离职率', data=overtime_rates,color='green')
    plt.title('加班状态与离职率对比')
    plt.xlabel('是否加班')
    plt.ylabel('离职率')
    plt.ylim(0, 0.5)  # 设置y轴范围
    # 添加百分比标签（正确格式化）
    for p in ax.patches:
        height = p.get_height()
        ax.annotate(f'{height:.2%}',
                    (p.get_x() + p.get_width() / 2., height + 0.01),
                    ha='center', va='bottom', fontsize=10)
    # 设置x轴标签为Yes/No（保持与数据一致）

    ax.set_xticklabels(['否', '是'], fontsize=12, rotation=0)
    plt.tight_layout()
    plt.savefig('../data/fig/加班状态与离职率对比.png')
    plt.close()
                #人才流失性别分布图
    gender_attrition_count = data.groupby(['Gender', 'Attrition'], observed=False).size().unstack(fill_value=0)
    ax = gender_attrition_count.plot(kind='bar', stacked=True, color=['purple', 'orange'], figsize=(10, 6))
    ax.set_title('(a) 人才流失性别分布图', fontsize=16)
    ax.set_xlabel('Gender', fontsize=14)
    ax.set_ylabel('Count', fontsize=14)
    ax.legend(['未离职', '离职'], fontsize=12)
    ax.set_xticklabels(['女', '男'], fontsize=12, rotation=0)
    plt.savefig('../data/fig/人才流失性别分布图.png')
    plt.close()
    plt.show()
                        #热力图
    columns = [
        'Age',  'DistanceFromHome', 'Education', 'EnvironmentSatisfaction','JobInvolvement', 'JobLevel',
        'JobSatisfaction','MonthlyIncome', 'NumCompaniesWorked','PercentSalaryHike', 'PerformanceRating',
        'RelationshipSatisfaction', 'StockOptionLevel', 'TotalWorkingYears','TrainingTimesLastYear',
        'WorkLifeBalance', 'YearsAtCompany','YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager',
        'Attrition']
    # 筛选数据
    df = data[columns].copy()
    # 数据预处理：将分类变量转为数值（标签编码）
    categorical_cols = df.select_dtypes(include=['object']).columns
    for col in categorical_cols:
        df[col] = pd.Categorical(df[col]).codes
    # 计算相关系数矩阵（使用Spearman方法处理非正态分布数据）
    correlation = df.corr(method='spearman')
    # 设置负号正确显示
    plt.rcParams['axes.unicode_minus'] = False
    plt.figure(figsize=(18, 15))
    # 绘制热力图
    cmap = sns.diverging_palette(230, 20, as_cmap=True)  # 自定义颜色
    sns.heatmap(
        correlation,
        annot=True,
        fmt=".2f",
        cmap=cmap,
        # 其他参数保留
    )
    # 设置标题
    plt.title('员工流失相关因素热力图', fontsize=16)
    # 调整布局
    plt.tight_layout()
    plt.savefig('../data/fig/相关性热力图.png')
    # 显示图形
    plt.show()

    # data.info()
    # print('-------------------------------')
    pd.set_option('display.max_columns', None)
    # print(data.describe())
    # print('-------------------------------')






# 独立定义的特征工程函数
def feature_engineering(data, logger):
    """特征工程：调用分类函数生成新特征"""
    logger.info('开始进行特征工程...')
    # 创建副本
    feature_data = data.copy()

    # 月收入等级
    logger.info('特征工程：拆分月收入等级并进行独热编码')
    quantiles_income = data['MonthlyIncome'].quantile([0, 0.25, 0.5, 0.75, 1]).tolist()
    feature_data['月收入等级'] = feature_data['MonthlyIncome'].apply(
        lambda income: '1' if income <= quantiles_income[1] else
        '2' if income <= quantiles_income[2] else
        '3' if income <= quantiles_income[3] else
        '4' if income <= quantiles_income[4] else '5'
    )
    feature_data_MonthlyIncome = pd.get_dummies(feature_data['月收入等级'], dtype=int, prefix='月收入等级')

    # 年龄区间
    logger.info('特征工程：拆分年龄区间并进行独热编码')
    quantiles_age = data['Age'].quantile([0, 0.25, 0.5, 0.75, 1]).tolist()
    feature_data['年龄区间'] = feature_data['Age'].apply(
        lambda age: '1' if age <= quantiles_age[1] else
        '2' if age <= quantiles_age[2] else
        '3' if age <= quantiles_age[3] else
        '4' if age <= quantiles_age[4] else '5'
    )
    feature_data_Age = pd.get_dummies(feature_data['年龄区间'], dtype=int, prefix='年龄区间')

    # 家到公司距离组
    logger.info('特征工程：拆分家到公司距离并进行独热编码')
    feature_data['家到公司距离组'] = feature_data['DistanceFromHome'].apply(
        lambda distance: '1' if 0 <= distance <= 5 else
        '2' if 6 <= distance <= 10 else
        '3' if 11 <= distance <= 15 else '4'
    )
    feature_data_DistanceFromHome = pd.get_dummies(feature_data['家到公司距离组'], dtype=int, prefix='距离组')

    # 合并新特征
    logger.info('特征工程：合并特征')
    feature_data = pd.concat([
        feature_data,
        feature_data_MonthlyIncome,
        feature_data_Age,
        feature_data_DistanceFromHome,
    ], axis=1)

    # drop无用特征
    logger.warning('删除无用特征:EmployeeNumber,Over18,StandardHours')
    feature_data.drop(columns=['EmployeeNumber', 'Over18', 'StandardHours'], inplace=True)
    logger.warning('隐藏重复特征:MonthlyIncome,月收入等级,年龄区间,家到公司距离组, 恢复在in Line73')
    feature_data.drop(columns=['MonthlyIncome','月收入等级','年龄区间','家到公司距离组'], inplace=True)

    logger.info('返回feature_data , 1')
    # return feature_data

    data = data.copy()
    y = data['Attrition']
    data = data[['StockOptionLevel','TotalWorkingYears','TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole','YearsSinceLastPromotion','YearsWithCurrManager']]
    #将股票期权分为有和无两类
    # data['StockOptionLevel_None'] = data['StockOptionLevel'].apply(lambda x: 0 if x > 0 else 1)
    # data['StockOptionLevel_NotNone'] = data['StockOptionLevel'].apply(lambda x: 1 if x > 0 else 0)

    #将工作年限分为小于6年、5-10年、10-15年、大于15年
    logger.info('特征工程：划分工作年限')
    data['TotalWorkingYears_Less6'] = data['TotalWorkingYears'].apply(lambda x: 1 if x < 6 else 0)
    data['TotalWorkingYears_6-10'] = data['TotalWorkingYears'].apply(lambda x: 1 if x >= 6 and x < 10 else 0)
    data['TotalWorkingYears_10-15'] = data['TotalWorkingYears'].apply(lambda x: 1 if x >= 10 and x<15  else 0)
    data['TotalWorkingYears_more15'] = data['TotalWorkingYears'].apply(lambda x: 1 if x >= 15 else 0)

    #在当前岗位工作年限小于2年、3-4年、4-7年、大于7年
    logger.info('特征工程：当前岗位工作年限')
    data['YearsInCurrentRole_Less2'] = data['YearsInCurrentRole'].apply(lambda x: 1 if x < 2 else 0)
    data['YearsInCurrentRole_2-4'] = data['YearsInCurrentRole'].apply(lambda x: 1 if x >= 2 and x < 4 else 0)
    data['YearsInCurrentRole_4-7'] = data['YearsInCurrentRole'].apply(lambda x: 1 if x >= 4 and x<7  else 0)
    data['YearsInCurrentRole_more7'] = data['YearsInCurrentRole'].apply(lambda x: 1 if x >= 7 else 0)

    logger.info('特征工程：计算相关比值并新增列')
    #总工作年限和本公司工作年限的比值，保留两位小数
    data['WorkingYearsRatio'] =data['YearsAtCompany']/data['TotalWorkingYears']
    #与当前经理共事年限与在公司总工作年限的比值
    data['YearsWithCurrManagerRatio'] = data['YearsWithCurrManager']/data['YearsAtCompany']
    #自上次晋升与在公司工作年限的交互作用值
    data['Promotion_Total_Interaction'] = data['YearsAtCompany'] * data['TotalWorkingYears']

    #填充空值
    logger.warning('特征工程：填充比值部分存在的空值(30)')
    data['WorkingYearsRatio'] = data['WorkingYearsRatio'].fillna(data['WorkingYearsRatio'].mean())
    data['YearsWithCurrManagerRatio'] = data['YearsWithCurrManagerRatio'].fillna(
    data['YearsWithCurrManagerRatio'].mean())

    #删除无用的特征(已有其他替代)
    logger.debug('删除无用特征:YearsInCurrentRole,YearsWithCurrManager,YearsAtCompany,TotalWorkingYears,YearsSinceLastPromotion')
    data.drop(columns=['YearsInCurrentRole'], inplace=True)
    data.drop(columns=['YearsWithCurrManager'], inplace=True)
    data.drop(columns=['YearsAtCompany'], inplace=True)
    data.drop(columns=['TotalWorkingYears'], inplace=True)
    data.drop(columns=['YearsSinceLastPromotion'], inplace=True)

    # # #标准化操作
    # columns = data.columns
    # transfer = StandardScaler()
    # data = transfer.fit_transform(data)
    # data = pd.DataFrame(data, columns=columns)
    merged_df = pd.concat([feature_data, data], axis=1)
    duplicate_columns = []
    cols = merged_df.columns
    for i in range(len(cols)):
        for j in range(i + 1, len(cols)):
            # 比较两列的值是否完全一致
            if merged_df[cols[i]].equals(merged_df[cols[j]]):
                duplicate_columns.append(cols[j])  # 记录重复列名

    # 删除重复列
    merged_df = merged_df.drop(columns=duplicate_columns)
    merged_df.drop(columns=['Age','Attrition','DistanceFromHome','TotalWorkingYears','YearsInCurrentRole'], inplace=True)
    return  merged_df,y

def model_train(X,Y, logger):
    x = X
    y = Y

    logger.info("开始训练模型...")
    # 1. 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(
        x, y, test_size=0.2, random_state=99, stratify=y)

    # 2. 过采样 + 欠采样
    over = SMOTE(sampling_strategy=0.2, random_state=110)
    under = RandomUnderSampler(sampling_strategy=0.5, random_state=20)
    pipeline = Pipeline(steps=[('over', over), ('under', under)])
    x_res, y_res = pipeline.fit_resample(x_train, y_train)

    # 3. 模型训练
    lgbm = LGBMClassifier(
        max_depth=11,
        num_leaves=30,
        min_child_samples=18,
        bagging_fraction=0.6,
        feature_fraction=0.4,
        random_state=26,
        verbose=-1
    )
    # lgbm = LGBMClassifier(random_state=26,verbose=-1)
    # param_dict ={
    #        'max_depth':[i for i in range(5,20)],
    #        'num_leaves':[i for i in range(20,70,5)],
    #        'min_child_samples':[i for i in range(12,20,2)],
    #        'bagging_fraction':[0.6,0.8,1],
    #        'feature_fraction':[0.4,0.5,0.6]
    # }
    # g_lgbm = GridSearchCV(lgbm, param_grid=param_dict, cv=5)
    # g_lgbm.fit(x_res, y_res,eval_set = [(x_train,y_train),(x_test,y_test)],eval_metric = 'auc')
    # print(g_lgbm.best_params_)


    lgbm.fit(x_res, y_res, eval_set=[(x_train, y_train), (x_test, y_test)], eval_metric='auc')

    # 4. 模型预测和评估
    y_pred_lgbm = lgbm.predict_proba(x_test)[:, 1]
    y_pred = lgbm.predict(x_test)

    print(f'初始模型lgbm的AUC值: {roc_auc_score(y_test, y_pred_lgbm)}')
    print(f'初始模型评估报告:\n {classification_report(y_test, y_pred)}')
    #
    # #5、模型保存
    # joblib.dump(lgbm,"../model/lgbm_20250720.pkl")
    # logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/lgbm_20250720.pkl")}")

# 主类定义



if __name__ == '__main__':
    tlp = TalentLossPredictModel(r'..\data\train.csv')
    feature_data_columns,label=feature_engineering(tlp.data_source,tlp.logfile)
    # feature_data_columns.info()
    model_train(feature_data_columns,label, tlp.logfile)

