"""
员工离职预测模型系统
功能：通过机器学习模型预测员工是否会离职，支持多模型训练、网格搜索调优、堆叠集成及模型评估
版本：1.0.0
"""
import os
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score,
                             roc_auc_score, confusion_matrix, classification_report)

from src.utils.logger import Logger
logging = Logger('test_log').get_logger()

# ==================== 配置常量 ====================
class Config:
    """项目配置常量类，集中管理路径、参数等配置"""
    # 路径配置
    DATA_DIR = Path(r"D:\WorkArea\WorkSpace\Python\talents_loss\data\raw")  # 原始数据目录
    MODEL_DIR = Path("models")  # 模型保存目录
    RESULT_DIR = Path("results")  # 结果（评估指标、图片）保存目录

    # 文件名称
    TRAIN_FILE = "train.csv"
    TEST_FILE = "test.csv"
    EVAL_CSV = "模型评估指标汇总.csv"

    # 模型参数
    RANDOM_STATE = 42  # 随机种子，保证结果可复现
    CV_FOLDS = 5  # 交叉验证折数
    TOP_N_MODELS = 3  # 选择Top N基础模型用于堆叠
    FIG_DPI = 300  # 图片分辨率

    @classmethod
    def init_directories(cls):
        """初始化项目目录（模型、结果目录），不存在则创建"""
        for dir_path in [cls.MODEL_DIR, cls.RESULT_DIR]:
            if not dir_path.exists():
                dir_path.mkdir(parents=True, exist_ok=True)
                logging.info(f"创建目录: {dir_path}")


# ==================== 日志配置 ====================
# def setup_logging():
#     """配置日志系统，输出到控制台和文件"""
#     logging.basicConfig(
#         level=logging.INFO,
#         format="%(asctime)s - %(levelname)s - %(message)s",
#         handlers=[
#             logging.FileHandler("model_training.log", encoding="utf-8"),  # 日志文件
#             logging.StreamHandler()  # 控制台输出
#         ]
#     )
#     logging.info("日志系统初始化完成")


# ==================== 数据处理模块 ====================
def load_and_preprocess_data(train_path, test_path):
    """
    加载训练集和测试集数据，并分离特征与标签

    参数:
        train_path (Path): 训练集文件路径
        test_path (Path): 测试集文件路径

    返回:
        X_train (pd.DataFrame): 训练集特征
        y_train (pd.Series): 训练集标签
        X_test (pd.DataFrame): 测试集特征
        y_test (pd.Series): 测试集标签

    异常:
        FileNotFoundError: 若数据文件不存在则抛出
    """
    try:
        # 加载数据
        logging.info(f"加载训练集: {train_path}")
        train_df = pd.read_csv(train_path)
        logging.info(f"加载测试集: {test_path}")
        test_df = pd.read_csv(test_path)

        # 检查缺失值
        train_missing = train_df.isnull().sum()[train_df.isnull().sum() > 0]
        test_missing = test_df.isnull().sum()[test_df.isnull().sum() > 0]
        logging.info(f"训练集缺失值情况:\n{train_missing}")
        logging.info(f"测试集缺失值情况:\n{test_missing}")

        # 分离特征与标签（假设标签列为"Attrition"）
        X_train = train_df.drop("Attrition", axis=1)
        y_train = train_df["Attrition"]
        X_test = test_df.drop("Attrition", axis=1)
        y_test = test_df["Attrition"]

        logging.info(f"数据加载完成 - 训练集样本数: {len(X_train)}, 测试集样本数: {len(X_test)}")
        return X_train, y_train, X_test, y_test

    except FileNotFoundError as e:
        logging.error(f"数据文件不存在: {e.filename}")
        raise  # 重新抛出异常，终止程序
    except Exception as e:
        logging.error(f"数据加载失败: {str(e)}", exc_info=True)
        raise


def build_preprocessor(X_train):
    """
    构建数据预处理管道，处理数值型和分类型特征

    预处理逻辑:
        - 数值特征：中位数填充缺失值 + 标准化
        - 分类特征：众数填充缺失值 + 独热编码（丢弃第一个类别避免多重共线性）

    参数:
        X_train (pd.DataFrame): 训练集特征，用于确定特征类型

    返回:
        preprocessor (ColumnTransformer): 组合后的预处理管道
        numeric_features (list): 数值特征名称列表
        categorical_features (list): 分类特征名称列表
    """
    # 区分数值和分类特征
    numeric_features = X_train.select_dtypes(include=['int64', 'float64']).columns.tolist()
    categorical_features = X_train.select_dtypes(include=['object']).columns.tolist()

    logging.info(f"数值特征({len(numeric_features)}个): {numeric_features}")
    logging.info(f"分类特征({len(categorical_features)}个): {categorical_features}")

    # 数值特征预处理管道
    numeric_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='median')),  # 中位数填充缺失值
        ('scaler', StandardScaler())  # 标准化（均值0，方差1）
    ])

    # 分类特征预处理管道
    categorical_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='most_frequent')),  # 众数填充缺失值
        ('encoder', OneHotEncoder(
            drop='first',  # 丢弃第一个类别，减少冗余
            sparse_output=False,  # 返回稠密矩阵
            handle_unknown='ignore'  # 测试集遇到未知类别时忽略
        ))
    ])

    # 组合预处理策略
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', numeric_transformer, numeric_features),
            ('cat', categorical_transformer, categorical_features)
        ])

    return preprocessor, numeric_features, categorical_features


# ==================== 模型训练模块 ====================
def train_with_grid_search(X_train, y_train, preprocessor, top_n=Config.TOP_N_MODELS):
    """
    使用网格搜索和交叉验证训练多个模型，并选择性能最优的Top N模型

    参数:
        X_train (pd.DataFrame): 训练集特征
        y_train (pd.Series): 训练集标签
        preprocessor (ColumnTransformer): 预处理管道
        top_n (int): 选择Top N模型

    返回:
        top_models (pd.DataFrame): 包含Top N模型信息的DataFrame，包括模型名称、性能指标、模型管道等
    """
    # 定义模型及超参数网格（key:模型名称，value:模型对象+参数网格）
    models_config = {
        "逻辑回归": {
            "model": LogisticRegression(
                class_weight='balanced',  # 处理类别不平衡
                random_state=Config.RANDOM_STATE,
                max_iter=1000  # 增加迭代次数确保收敛
            ),
            "param_grid": {
                'classifier__C': [0.01, 0.1, 1, 10],  # 正则化强度（越小正则化越强）
                'classifier__solver': ['liblinear', 'saga']  # 求解器
            }
        },
        "随机森林": {
            "model": RandomForestClassifier(random_state=Config.RANDOM_STATE),
            "param_grid": {
                'classifier__n_estimators': [100, 200, 300],  # 树的数量
                'classifier__max_depth': [5, 8, 10, None],  # 树的最大深度（None表示不限制）
                'classifier__min_samples_split': [2, 5]  # 分裂内部节点所需的最小样本数
            }
        },
        "XGBoost": {
            "model": XGBClassifier(random_state=Config.RANDOM_STATE),
            "param_grid": {
                'classifier__learning_rate': [0.01, 0.1, 0.2],  # 学习率
                'classifier__n_estimators': [50, 100, 200],  # 树的数量
                'classifier__max_depth': [3, 5, 7]  # 树的最大深度
            }
        },
        "LightGBM": {
            "model": LGBMClassifier(random_state=Config.RANDOM_STATE),
            "param_grid": {
                'classifier__learning_rate': [0.01, 0.1, 0.2],  # 学习率
                'classifier__n_estimators': [50, 100, 200],  # 树的数量
                'classifier__num_leaves': [20, 31, 50]  # 叶子节点数量（影响模型复杂度）
            }
        },
        "SVM": {
            "model": SVC(
                probability=True,  # 需要输出概率用于后续堆叠
                class_weight='balanced',
                random_state=Config.RANDOM_STATE
            ),
            "param_grid": {
                'classifier__C': [0.1, 1, 10],  # 正则化强度
                'classifier__kernel': ['linear', 'rbf'],  # 核函数
                'classifier__gamma': ['scale', 'auto']  # 核系数
            }
        }
    }

    # 网格搜索训练模型并记录结果
    results = []
    for model_name, config in models_config.items():
        logging.info(f"\n====== 开始{model_name}的网格搜索 ======")

        # 构建包含预处理和模型的完整管道
        pipeline = Pipeline([
            ('preprocessor', preprocessor),  # 预处理步骤
            ('classifier', config["model"])  # 分类器
        ])

        # 网格搜索（使用AUC作为评分指标，对不平衡数据更友好）
        grid_search = GridSearchCV(
            estimator=pipeline,
            param_grid=config["param_grid"],
            cv=Config.CV_FOLDS,  # 交叉验证折数
            scoring='roc_auc',  # 优化目标：AUC
            n_jobs=-1,  # 使用所有可用CPU核心
            verbose=1  # 输出训练过程信息
        )

        try:
            grid_search.fit(X_train, y_train)
        except Exception as e:
            logging.error(f"{model_name}训练失败: {str(e)}", exc_info=True)
            continue

        # 提取最佳模型及性能指标
        best_pipeline = grid_search.best_estimator_
        best_params = grid_search.best_params_
        best_auc = grid_search.best_score_  # 交叉验证最佳AUC
        cv_acc = cross_val_score(best_pipeline, X_train, y_train, cv=Config.CV_FOLDS, scoring='accuracy').mean()

        # 记录结果
        results.append({
            "模型名称": model_name,
            "交叉验证AUC": round(best_auc, 4),
            "交叉验证准确率": round(cv_acc, 4),
            "最佳参数": best_params,
            "模型管道": best_pipeline
        })

        logging.info(f"{model_name}最佳参数: {best_params}")
        logging.info(f"{model_name}交叉验证性能 - AUC: {best_auc:.4f}, 准确率: {cv_acc:.4f}")

    # 按AUC排序并选择Top N模型
    results_df = pd.DataFrame(results).sort_values(by="交叉验证AUC", ascending=False)
    top_models = results_df.head(top_n)
    logging.info(f"\nTop {top_n}模型排序:\n{top_models[['模型名称', '交叉验证AUC', '交叉验证准确率']].to_string()}")

    # 保存Top N模型
    for _, row in top_models.iterrows():
        model_path = Config.MODEL_DIR / f"{row['模型名称']}_最优模型.pkl"
        with open(model_path, "wb") as f:
            pickle.dump(row["模型管道"], f)
        logging.info(f"已保存模型: {model_path}")

    return top_models


def train_stacking_model(X_train, y_train, base_models):
    """
    训练堆叠集成模型（以Top N模型作为基础模型，逻辑回归作为元模型）

    参数:
        X_train (pd.DataFrame): 训练集特征
        y_train (pd.Series): 训练集标签
        base_models (pd.DataFrame): 基础模型信息（来自train_with_grid_search的输出）

    返回:
        all_models (pd.DataFrame): 包含基础模型和堆叠模型的信息
    """
    logging.info("\n====== 开始训练堆叠模型 ======")

    # 准备基础模型列表（格式：[(模型名称, 模型管道), ...]）
    estimators = [
        (row["模型名称"], row["模型管道"])
        for _, row in base_models.iterrows()
    ]

    # 定义堆叠模型：使用基础模型的预测概率作为元特征，逻辑回归作为元模型
    stacking_clf = StackingClassifier(
        estimators=estimators,
        final_estimator=LogisticRegression(
            class_weight='balanced',
            random_state=Config.RANDOM_STATE
        ),
        cv=Config.CV_FOLDS,  # 交叉验证生成元特征
        stack_method='predict_proba'  # 使用预测概率作为元特征（比类别预测更有效）
    )

    # 构建堆叠模型管道（基础模型已包含预处理，无需重复）
    stacking_pipeline = Pipeline([('stacking', stacking_clf)])

    # 训练堆叠模型
    try:
        stacking_pipeline.fit(X_train, y_train)
    except Exception as e:
        logging.error(f"堆叠模型训练失败: {str(e)}", exc_info=True)
        raise

    # 评估堆叠模型交叉验证性能
    cv_auc = cross_val_score(
        stacking_pipeline, X_train, y_train,
        cv=Config.CV_FOLDS, scoring='roc_auc'
    ).mean()
    cv_acc = cross_val_score(
        stacking_pipeline, X_train, y_train,
        cv=Config.CV_FOLDS, scoring='accuracy'
    ).mean()

    logging.info(f"堆叠模型交叉验证性能 - AUC: {cv_auc:.4f}, 准确率: {cv_acc:.4f}")

    # 保存堆叠模型
    stacking_path = Config.MODEL_DIR / "堆叠模型_最优模型.pkl"
    with open(stacking_path, "wb") as f:
        pickle.dump(stacking_pipeline, f)
    logging.info(f"已保存堆叠模型: {stacking_path}")

    # 合并基础模型和堆叠模型信息
    stacking_result = pd.DataFrame([{
        "模型名称": "堆叠模型",
        "交叉验证AUC": round(cv_auc, 4),
        "交叉验证准确率": round(cv_acc, 4),
        "模型管道": stacking_pipeline
    }])
    all_models = pd.concat([stacking_result, base_models], ignore_index=True)

    return all_models


# ==================== 模型评估模块 ====================
def evaluate_on_test_set(X_test, y_test, models):
    """
    在测试集上评估所有模型性能，并生成评估指标和可视化结果

    参数:
        X_test (pd.DataFrame): 测试集特征
        y_test (pd.Series): 测试集标签
        models (pd.DataFrame): 包含所有待评估模型信息的DataFrame
    """
    logging.info("\n====== 开始测试集评估 ======")

    # 加载模型并预测
    y_preds = {}  # 存储各模型的预测类别
    y_probs = {}  # 存储各模型的正类预测概率
    for model_name in models["模型名称"]:
        model_path = Config.MODEL_DIR / f"{model_name}_最优模型.pkl"
        try:
            with open(model_path, "rb") as f:
                pipeline = pickle.load(f)
        except FileNotFoundError:
            logging.error(f"模型文件不存在: {model_path}")
            continue
        except Exception as e:
            logging.error(f"加载模型{model_name}失败: {str(e)}", exc_info=True)
            continue

        # 预测
        y_pred = pipeline.predict(X_test)
        y_prob = pipeline.predict_proba(X_test)[:, 1]  # 取正类（离职）的概率
        y_preds[model_name] = y_pred
        y_probs[model_name] = y_prob
        logging.info(f"完成{model_name}的测试集预测")

    # 计算评估指标（准确率、精确率、召回率、F1、AUC）
    eval_results = []
    for model_name in y_preds.keys():
        y_pred = y_preds[model_name]
        y_prob = y_probs[model_name]

        eval_results.append({
            "模型名称": model_name,
            "准确率": round(accuracy_score(y_test, y_pred), 4),
            "精确率": round(precision_score(y_test, y_pred), 4),
            "召回率": round(recall_score(y_test, y_pred), 4),
            "F1分数": round(f1_score(y_test, y_pred), 4),
            "AUC": round(roc_auc_score(y_test, y_prob), 4)
        })

    # 保存评估指标
    eval_df = pd.DataFrame(eval_results)
    eval_path = Config.RESULT_DIR / Config.EVAL_CSV
    eval_df.to_csv(eval_path, index=False)
    logging.info(f"测试集评估结果:\n{eval_df.to_string()}")
    logging.info(f"评估指标已保存至: {eval_path}")

    # 绘制并保存混淆矩阵
    plt.rcParams["font.family"] = ["SimHei"]  # 设置中文显示
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

    for model_name in y_preds.keys():
        y_pred = y_preds[model_name]
        cm = confusion_matrix(y_test, y_pred)

        # 绘制混淆矩阵热图
        plt.figure(figsize=(8, 6))
        sns.heatmap(
            cm,
            annot=True,  # 显示数值
            fmt="d",  # 整数格式
            cmap="Blues",
            xticklabels=["未离职(0)", "离职(1)"],
            yticklabels=["未离职(0)", "离职(1)"]
        )
        plt.title(f"{model_name}混淆矩阵", fontsize=12)
        plt.xlabel("预测标签", fontsize=10)
        plt.ylabel("实际标签", fontsize=10)
        plt.tight_layout()  # 调整布局

        # 保存图片
        cm_path = Config.RESULT_DIR / f"{model_name}_混淆矩阵.png"
        plt.savefig(cm_path, dpi=Config.FIG_DPI)
        plt.close()  # 关闭画布，避免内存占用
        logging.info(f"已保存混淆矩阵: {cm_path}")


# ==================== 主函数 ====================
def main():
    """项目主函数，串联数据加载、预处理、模型训练、评估全流程"""
    try:
        # 初始化配置和日志
        Config.init_directories()
        # setup_logging()

        logging.info("====== 员工离职预测模型训练流程启动 ======")

        # 1. 数据加载与预处理
        logging.info("\n====== 步骤1: 数据加载与预处理 ======")
        train_path = Config.DATA_DIR / Config.TRAIN_FILE
        test_path = Config.DATA_DIR / Config.TEST_FILE
        X_train, y_train, X_test, y_test = load_and_preprocess_data(train_path, test_path)

        # 2. 构建预处理管道
        logging.info("\n====== 步骤2: 构建预处理管道 ======")
        preprocessor, _, _ = build_preprocessor(X_train)

        # 3. 训练基础模型并选择Top N
        logging.info("\n====== 步骤3: 模型训练与Top N选择 ======")
        top_models = train_with_grid_search(X_train, y_train, preprocessor)

        # 4. 训练堆叠模型
        logging.info("\n====== 步骤4: 训练堆叠模型 ======")
        all_models = train_stacking_model(X_train, y_train, top_models)

        # 5. 测试集评估
        logging.info("\n====== 步骤5: 测试集评估与结果保存 ======")
        evaluate_on_test_set(X_test, y_test, all_models)

        logging.info("\n====== 所有流程完成！ ======")

    except Exception as e:
        logging.critical(f"程序执行失败: {str(e)}", exc_info=True)
        exit(1)


if __name__ == "__main__":
    main()