"""
主要的训练模块代码
    步骤:
        1.创建训练的类
          初始化日志对象,加载预处理的数据
        2.数据分析
          查看整体离职的分布
          查看离职随小时的变化情况
          查看离职随月份变化情况
          对比查看工作日和周末离职情况
        3.模型训练
"""
import datetime

import joblib
import numpy as np
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import VarianceThreshold
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import class_weight
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score, accuracy_score, mean_absolute_error, precision_score, recall_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from utils.common import data_preprocessing
from utils.log import Logger
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings

warnings.filterwarnings('ignore')

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 25


class AttritionModel:
    def __init__(self, path):
        # 创建日志对象
        train_log_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logger = Logger(root_path='../', log_name=train_log_name).get_logger()

        # 加载预处理数据
        self.data_source = data_preprocessing(path)

    def ana_data(self):
        self.logger.info('============开始数据分析============')
        # 查看整体离职分布
        fig = plt.figure(figsize=(10, 40))
        ax1 = fig.add_subplot(411)
        sns.countplot(x='Attrition', data=self.data_source, ax=ax1)
        ax1.set_title('整体离职分布')
        ax1.set_xlabel('是否离职')
        # # 分析数据的方法:crosstab,查看特征对目标变量的影响
        # ax2 = fig.add_subplot(412)
        # ct_df = pd.crosstab(index=self.data_source['Age'], columns=self.data_source['Attrition'])
        # print('探索分析crosstab结果:', ct_df)
        # # 使用seaborn 绘制柱状图,展示年龄与人才流失的分布
        # sns.countplot(x='Age', hue='Attrition', data=self.data_source, ax=ax2)
        # ax2.set_title('不同年龄的离职分布')
        # ax2.set_xlabel('年龄')
        # ax2.set_ylabel('离职数量')
        # # BusinessTravel与人才流失的分布
        # business_travel_data = self.data_source.groupby(['BusinessTravel']).agg({
        #     'Attrition': 'mean'
        # })
        # print('探索分析BusinessTravel结果:', business_travel_data)
        # ax3 = fig.add_subplot(413)
        # sns.countplot(x='BusinessTravel', hue='Attrition', data=self.data_source, ax=ax3)
        # ax3.set_title('出差与离职分布')
        # ax3.set_xlabel('出差')
        # ax3.set_ylabel('离职数量')
        plt.show()

        # 准备数据
        data_copy = self.data_source.copy()

        # 对类别变量进行编码
        for column in data_copy.select_dtypes(include=['object']).columns:
            if column != 'Attrition':
                le = LabelEncoder()
                data_copy[column] = le.fit_transform(data_copy[column].astype(str))

        # 分离特征和目标变量
        X = data_copy.drop('Attrition', axis=1)
        y = data_copy['Attrition']

        # 计算每个特征的AUC
        auc_scores = []

        for column in X.columns:
            try:
                # 使用特征值作为预测分数计算AUC
                auc = roc_auc_score(y, X[column])
                # 如果AUC < 0.5，说明特征与标签负相关，取1-AUC使其表示关系强度
                if auc < 0.5:
                    auc = 1 - auc
                auc_scores.append({
                    'feature': column,
                    'auc': auc
                })
            except Exception as e:
                print(f"计算特征 {column} 的AUC时出错: {e}")

        # 创建DataFrame并排序
        auc_df = pd.DataFrame(auc_scores).sort_values('auc', ascending=False)
        print(auc_df)
        # 选择AUC > 0.6的特征作为模型特征:TotalWorkingYears, OverTime, MonthlyIncome, Age, YearsAtCompany,
        # JobLevel, YearsInCurrentRole, YearsWithCurrManager, StockOptionLevel, MaritalStatus

    def feature_engineering_with_AUC(self):
        """
        1.获取数据分析后确定的特征
        2.onehot编码
        3.处理空值
        :return: 特征数据,标签数据,特征列名
        """
        self.logger.info('============开始特征工程============')
        # 获取数据分析后确定的特征
        # features = ['Attrition', 'TotalWorkingYears', 'OverTime', 'MonthlyIncome', 'Age', 'YearsAtCompany',
        #             'JobLevel', 'YearsInCurrentRole', 'YearsWithCurrManager', 'StockOptionLevel', 'MaritalStatus',
        #             'JobSatisfaction', 'JobInvolvement', 'JobRole', 'EnvironmentSatisfaction', 'DistanceFromHome',
        #             'YearsSinceLastPromotion']
        features = ['Attrition', 'StockOptionLevel', 'JobLevel', 'EnvironmentSatisfaction', 'YearsWithCurrManager',
                    'OverTime', 'Age', 'WorkLifeBalance', 'JobInvolvement', 'NumCompaniesWorked',
                    'MonthlyIncome', 'DistanceFromHome', 'Department', 'MaritalStatus']
        data = self.data_source[features]
        print('特征数据: ', data.head())

        # 分析特征类型并选择合适的编码方式
        cat_cols = data.select_dtypes(include=['object']).columns

        for col in cat_cols:
            # 判断是否为有序变量
            if col in ['OverTime']:  # 明确有顺序的变量
                # 使用LabelEncoder保持顺序信息
                le = LabelEncoder()
                data[col] = le.fit_transform(data[col])
            else:
                # 使用One-Hot编码避免引入虚假顺序
                # 注意：如果类别很多，可能会导致维度爆炸
                dummies = pd.get_dummies(data[col], prefix=col)
                data = pd.concat([data.drop(col, axis=1), dummies], axis=1)

        print('编码后的特征数据: ', data.head())
        # 处理空值
        data.dropna(inplace=True)
        print(data.columns)
        self.logger.info('============特征工程结束============')
        return data.iloc[:, 1:], data.Attrition, data.columns[1:]

    def feature_engineering_with_variance(self):
        """
        结合方差法的特征工程
        :return:
        """
        # 获取数据分析后确定的特征
        # feature = ['Attrition', 'TotalWorkingYears', 'OverTime', 'MonthlyIncome', 'Age', 'YearsAtCompany',
        #            'JobLevel', 'YearsInCurrentRole', 'YearsWithCurrManager', 'StockOptionLevel', 'MaritalStatus',
        #            'JobSatisfaction', 'JobInvolvement', 'JobRole', 'EnvironmentSatisfaction',
        #            'YearsSinceLastPromotion']
        feature = ['Attrition', 'StockOptionLevel', 'JobLevel', 'EnvironmentSatisfaction', 'YearsWithCurrManager',
                   'OverTime', 'Age', 'WorkLifeBalance', 'JobInvolvement', 'NumCompaniesWorked',
                   'MonthlyIncome', 'DistanceFromHome', 'Department', 'MaritalStatus']
        data = self.data_source[feature]

        # 分离特征和标签
        X = data.drop('Attrition', axis=1)
        y = data['Attrition']

        # 处理标签 - 统一编码
        if y.dtype == 'object':
            y = y.apply(lambda x: 1 if str(x).lower() == 'yes' else 0)

        # 对类别变量进行编码
        cat_cols = X.select_dtypes(include=['object']).columns
        for col in cat_cols:
            if col in ['OverTime']:
                le = LabelEncoder()
                X[col] = le.fit_transform(X[col])
            else:
                dummies = pd.get_dummies(X[col], prefix=col)
                X = pd.concat([X.drop(col, axis=1), dummies], axis=1)

        # 特征选择 - 使用更高阈值或基于模型的特征选择
        selector = VarianceThreshold(threshold=0.01)
        X_selected = selector.fit_transform(X)

        selected_features = X.columns[selector.get_support()]
        print(f"原始特征数: {X.shape[1]}")
        print(f"选择后特征数: {X_selected.shape[1]}")
        print(f"选中的特征: {list(selected_features)}")

        # 处理空值
        X_selected_df = pd.DataFrame(X_selected, columns=selected_features)
        X_selected_df.dropna(inplace=True)
        y = y[X_selected_df.index]

        print('处理空值后的特征数据: ', X_selected_df.head())
        print('处理空值后的标签数据: ', y.head())
        print('处理空值后的特征列名: ', X_selected_df.columns.tolist())

        self.logger.info('============特征工程结束============')
        return X_selected_df, y, selected_features

    def model_train(self):
        """
        1.划分数据集
        2.网格搜索最优超参数
        3.训练模型
        4.评估模型
        5.保存模型
        :return:
        """
        self.logger.info('============开始模型训练============')
        # 加载特征数据
        # 根据AUC
        x, y, feature_columns = self.feature_engineering_with_AUC()
        # 根据方差
        # x, y, feature_columns = self.feature_engineering_with_variance()

        # 划分数据集
        train_index = int(len(x) * 0.6)
        val_index = int(len(x) * 0.8)
        x_train, x_val, x_test = x.iloc[:train_index, :], x.iloc[train_index:val_index, :], x.iloc[val_index:, :]
        y_train, y_val, y_test = y.iloc[:train_index], y.iloc[train_index:val_index], y.iloc[val_index:]
        smote = SMOTE(random_state=42, k_neighbors=3)
        x_train_resampled, y_train_resampled = smote.fit_resample(x_train, y_train)
        print(f"SMOTE处理前训练集大小: {x_train.shape}, 正负样本分布: {y_train.value_counts().to_dict()}")
        print(
            f"SMOTE处理后训练集大小: {x_train_resampled.shape}, 正负样本分布: {y_train_resampled.value_counts().to_dict()}")

        # 标准化
        scaler = StandardScaler()
        x_train_scaled = scaler.fit_transform(x_train_resampled)  # 在重采样后的训练集上fit和transform
        x_val_scaled = scaler.transform(x_val)  # 在验证集上transform
        x_test_scaled = scaler.transform(x_test)  # 在测试集上transform

        # 合并训练和验证集用于最终训练
        x_total_train_scaled = np.concatenate([x_train_scaled, x_val_scaled])
        y_total_train = pd.concat([y_train_resampled, y_val])  # 注意这里使用重采样后的y_train

        # # 网格搜索最优超参数
        # lr_params = {
        #     'C': [0.1, 1, 10],
        #     'class_weight': [None, 'balanced']
        # }
        # lr_grid = GridSearchCV(LogisticRegression(), lr_params, cv=5, scoring='f1')
        # lr_grid.fit(x_total_train_scaled, y_total_train)
        #
        # print('逻辑回归模型最优参数: ', lr_grid.best_params_)
        #
        # rf_params = {
        #     'n_estimators': [100, 200, 300],
        #     'max_depth': [5, 10, 15],
        #     'class_weight': [None, 'balanced']
        # }
        # rf_grid = GridSearchCV(RandomForestClassifier(), rf_params, cv=5)
        # rf_grid.fit(x_total_train_scaled, y_total_train)
        # print('随机森林模型最优参数: ', rf_grid.best_params_)
        # # XGBoost网格搜索
        # xgb_params = {
        #     'n_estimators': [100, 200, 300, 400, 500],
        #     'max_depth': [2, 3, 4, 5, 6],
        #     'learning_rate': [0.01, 0.05, 0.1, 0.2],
        #     'random_state': [9, 22, 42, 62]
        # }
        # xgb_grid = GridSearchCV(XGBClassifier(random_state=42), xgb_params, cv=5, scoring='f1')
        # xgb_grid.fit(x_total_train_scaled, y_total_train)
        # print('XGBoost模型最优参数: ', xgb_grid.best_params_)
        # xgb_best = xgb_grid.best_estimator_
        # GBDT 网格搜索交叉验证
        # estimator = GradientBoostingClassifier()
        # param = {"n_estimators": [100, 300, 200, 500,  1000], "max_depth": [4, 5, 6, 7], "random_state": [4, 5, 6, 7, 8, 9, 22, 42]}
        # estimator = GridSearchCV(estimator, param_grid=param, cv=5)
        # estimator.fit(x_total_train_scaled, y_total_train)
        # print(estimator.best_estimator_)

        # 训练模型
        # 逻辑回归
        lr = LogisticRegression(class_weight='balanced', C=10, random_state=42, max_iter=1000)
        lr.fit(x_total_train_scaled, y_total_train)

        # 随机森林 - 增加树的数量，调整类别权重
        rf = RandomForestClassifier(
            n_estimators=200,
            max_depth=10,
            class_weight='balanced',
            random_state=42
        )
        rf.fit(x_total_train_scaled, y_total_train)

        # XGBoost - 调整scale_pos_weight处理不平衡
        pos_weight = len(y_total_train) / sum(y_total_train)  # 计算正样本权重
        xgb = XGBClassifier(
            n_estimators=200,
            max_depth=6,
            learning_rate=0.1,
            random_state=42,
            subsample=1.0,
            scale_pos_weight=pos_weight  # 处理类别不平衡
        )
        xgb.fit(x_total_train_scaled, y_total_train)

        # 决策树
        dt = DecisionTreeClassifier(
            max_depth=10,
            class_weight='balanced',
            min_samples_split=5,
            random_state=42
        )
        dt.fit(x_total_train_scaled, y_total_train)

        # GBDT
        gb_best = GradientBoostingClassifier(
            n_estimators=200,
            learning_rate=0.1,
            max_depth=6,
            random_state=42,
            subsample=1.0  # 添加子采样防止过拟合
        )
        gb_best.fit(x_total_train_scaled, y_total_train)

        # 评估模型
        lr_y_pred = lr.predict(x_test_scaled)
        rf_y_pred = rf.predict(x_test_scaled)
        xgb_y_pred = xgb.predict(x_test_scaled)
        dt_y_pred = dt.predict(x_test_scaled)
        gb_y_pred = gb_best.predict(x_test_scaled)
        lr_f1 = f1_score(y_test, lr_y_pred)
        rf_f1 = f1_score(y_test, rf_y_pred)
        xgb_f1 = f1_score(y_test, xgb_y_pred)
        dt_f1 = f1_score(y_test, dt_y_pred)
        gb_f1 = f1_score(y_test, gb_y_pred)

        print("=== 模型评估结果 ===")
        print('逻辑回归模型:')
        print(f'  准确率: {accuracy_score(y_test, lr_y_pred):.4f}')
        print(f'  精确率: {precision_score(y_test, lr_y_pred):.4f}')
        print(f'  召回率: {recall_score(y_test, lr_y_pred):.4f}')
        print(f'  F1_score: {lr_f1:.4f}')

        print('随机森林模型:')
        print(f'  准确率: {accuracy_score(y_test, rf_y_pred):.4f}')
        print(f'  精确率: {precision_score(y_test, rf_y_pred):.4f}')
        print(f'  召回率: {recall_score(y_test, rf_y_pred):.4f}')
        print(f'  F1_score: {rf_f1:.4f}')

        print('XGBoost模型:')
        print(f'  准确率: {accuracy_score(y_test, xgb_y_pred):.4f}')
        print(f'  精确率: {precision_score(y_test, xgb_y_pred):.4f}')
        print(f'  召回率: {recall_score(y_test, xgb_y_pred):.4f}')
        print(f'  F1_score: {xgb_f1:.4f}')

        print('决策树模型:')
        print(f'  准确率: {accuracy_score(y_test, dt_y_pred):.4f}')
        print(f'  精确率: {precision_score(y_test, dt_y_pred):.4f}')
        print(f'  召回率: {recall_score(y_test, dt_y_pred):.4f}')
        print(f'  F1_score: {dt_f1:.4f}')

        print('GBDT模型:')
        print(f'  准确率: {accuracy_score(y_test, gb_y_pred):.4f}')
        print(f'  精确率: {precision_score(y_test, gb_y_pred):.4f}')
        print(f'  召回率: {recall_score(y_test, gb_y_pred):.4f}')
        print(f'  F1_score: {gb_f1:.4f}')

        xgb_mae = mean_absolute_error(y_test, xgb_y_pred)
        print(f'xgb模型评估指标MAE: {xgb_mae}')
        lr_mae = mean_absolute_error(y_test, lr_y_pred)
        print(f'lr模型评估指标MAE: {lr_mae}')
        dt_mae = mean_absolute_error(y_test, dt_y_pred)
        print(f'dt模型评估指标MAE: {dt_mae}')
        rf_mae = mean_absolute_error(y_test, rf_y_pred)
        print(f'rf模型评估指标MAE: {rf_mae}')
        gb_mae = mean_absolute_error(y_test, gb_y_pred)
        print(f'GBDT模型评估指标MAE: {gb_mae}')
        # 5.保存模型
        joblib.dump(lr, '../model/lr_model.pth')
        joblib.dump(rf, '../model/rf_model.pth')
        joblib.dump(xgb, '../model/xgboost_model.pth')
        joblib.dump(dt, '../model/dt_model.pth')
        joblib.dump(gb_best, '../model/gb_model.pth')
        self.logger.info('============模型训练结束============')


if __name__ == '__main__':
    am = AttritionModel('../data/train.csv')
    am.ana_data()
    # print(am.feature_engineering_with_AUC())
    am.model_train()
