import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score,
                             roc_auc_score, confusion_matrix, classification_report)

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题


def load_and_preprocess_data(train_path, test_path):
    """加载数据并分离特征与标签"""
    # 加载数据
    train_df = pd.read_csv(train_path)
    test_df = pd.read_csv(test_path)

    # 检查并处理缺失值
    print(f"训练集缺失值情况:\n{train_df.isnull().sum()[train_df.isnull().sum() > 0]}")
    print(f"测试集缺失值情况:\n{test_df.isnull().sum()[test_df.isnull().sum() > 0]}")

    # 分离特征与目标变量
    X_train = train_df.drop("Attrition", axis=1)
    y_train = train_df["Attrition"]
    X_test = test_df.drop("Attrition", axis=1)
    y_test = test_df["Attrition"]

    return X_train, y_train, X_test, y_test


def build_preprocessor(X_train):
    """构建预处理管道（包含缺失值填充、标准化、编码）"""
    # 区分数值和分类特征
    numeric_features = X_train.select_dtypes(include=['int64', 'float64']).columns.tolist()
    categorical_features = X_train.select_dtypes(include=['object']).columns.tolist()
    print(f"数值特征: {numeric_features}\n分类特征: {categorical_features}")

    # 数值特征预处理：填充缺失值+标准化
    numeric_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='median')),
        ('scaler', StandardScaler())
    ])

    # 分类特征预处理：填充缺失值+独热编码
    categorical_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='most_frequent')),
        ('encoder', OneHotEncoder(drop='first', sparse_output=False, handle_unknown='ignore'))
    ])

    # 组合预处理策略
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', numeric_transformer, numeric_features),
            ('cat', categorical_transformer, categorical_features)
        ])

    return preprocessor, numeric_features, categorical_features


def train_with_grid_search(X_train, y_train, preprocessor, top_n=3):
    """使用网格搜索+交叉验证训练模型并选择Top N模型"""
    # 定义模型及参数网格
    models = {
        "逻辑回归": {
            "model": LogisticRegression(class_weight='balanced', random_state=42, max_iter=1000),
            "param_grid": {
                'classifier__C': [0.01, 0.1, 1, 10],
                'classifier__solver': ['liblinear', 'saga']
            }
        },
        "随机森林": {
            "model": RandomForestClassifier(random_state=42),
            "param_grid": {
                'classifier__n_estimators': [100, 200, 300],
                'classifier__max_depth': [5, 8, 10, None],
                'classifier__min_samples_split': [2, 5]
            }
        },
        "XGBoost": {
            "model": XGBClassifier(random_state=42),
            "param_grid": {
                'classifier__learning_rate': [0.01, 0.1, 0.2],
                'classifier__n_estimators': [50, 100, 200],
                'classifier__max_depth': [3, 5, 7]
            }
        },
        "LightGBM": {
            "model": LGBMClassifier(random_state=42),
            "param_grid": {
                'classifier__learning_rate': [0.01, 0.1, 0.2],
                'classifier__n_estimators': [50, 100, 200],
                'classifier__num_leaves': [20, 31, 50]
            }
        },
        "SVM": {
            "model": SVC(probability=True, class_weight='balanced', random_state=42),
            "param_grid": {
                'classifier__C': [0.1, 1, 10],
                'classifier__kernel': ['linear', 'rbf'],
                'classifier__gamma': ['scale', 'auto']
            }
        }
    }

    # 网格搜索评估模型
    results = []
    for name, config in models.items():
        print(f"\n开始{name}的网格搜索...")
        # 创建包含预处理和模型的管道
        pipeline = Pipeline([
            ('preprocessor', preprocessor),
            ('classifier', config["model"])
        ])

        # 网格搜索
        grid_search = GridSearchCV(
            estimator=pipeline,
            param_grid=config["param_grid"],
            cv=5,
            scoring='roc_auc',
            n_jobs=-1,
            verbose=1
        )
        grid_search.fit(X_train, y_train)

        # 获取最佳模型
        best_pipeline = grid_search.best_estimator_
        best_params = grid_search.best_params_
        best_auc = grid_search.best_score_

        # 计算准确率
        cv_acc = cross_val_score(best_pipeline, X_train, y_train, cv=5, scoring='accuracy').mean()

        results.append({
            "模型名称": name,
            "交叉验证AUC": round(best_auc, 4),
            "交叉验证准确率": round(cv_acc, 4),
            "最佳参数": best_params,
            "模型管道": best_pipeline
        })
        print(f"{name}最佳参数: {best_params}")
        print(f"{name}最佳交叉验证AUC: {best_auc:.4f}, 准确率: {cv_acc:.4f}")

    # 按AUC排序选择Top N模型
    results_df = pd.DataFrame(results).sort_values(by="交叉验证AUC", ascending=False)
    top_models = results_df.head(top_n)
    print("\nTop3模型排序:\n", top_models[["模型名称", "交叉验证AUC", "交叉验证准确率"]])

    # 训练完整模型并保存
    for i, row in top_models.iterrows():
        model_name = row["模型名称"]
        pipeline = row["模型管道"]
        pipeline.fit(X_train, y_train)  # 用全量训练集拟合
        with open(f"{model_name}_最优模型.pkl", "wb") as f:
            pickle.dump(pipeline, f)
        print(f"已保存模型: {model_name}_最优模型.pkl")

    return top_models


def train_stacking_model(X_train, y_train, preprocessor, base_models):
    """训练堆叠模型"""
    print("\n====== 训练堆叠模型 ======")

    # 准备基础模型列表 (名称, 模型)，从model中提取"模型名称"字段
    estimators = [
        (model["模型名称"], model["模型管道"])  # 修正：用model["模型名称"]获取名称
        for _, model in base_models.iterrows()
    ]

    # 定义堆叠模型，使用逻辑回归作为元模型
    stacking_clf = StackingClassifier(
        estimators=estimators,  # 传入基础模型列表
        final_estimator=LogisticRegression(class_weight='balanced', random_state=42),
        cv=5,  # 交叉验证折数
        stack_method='predict_proba'  # 使用预测概率作为元特征
    )

    # 构建包含预处理和堆叠模型的管道（注意：基础模型已包含预处理，此处无需重复）
    stacking_pipeline = Pipeline([
        ('stacking', stacking_clf)  # 基础模型的管道已包含preprocessor，这里直接用堆叠模型
    ])

    # 训练堆叠模型
    stacking_pipeline.fit(X_train, y_train)

    # 评估堆叠模型性能
    cv_auc = cross_val_score(stacking_pipeline, X_train, y_train, cv=5, scoring='roc_auc').mean()
    cv_acc = cross_val_score(stacking_pipeline, X_train, y_train, cv=5, scoring='accuracy').mean()

    print(f"堆叠模型交叉验证AUC: {cv_auc:.4f}, 准确率: {cv_acc:.4f}")

    # 保存堆叠模型
    with open("堆叠模型_最优模型.pkl", "wb") as f:
        pickle.dump(stacking_pipeline, f)
    print("已保存模型: 堆叠模型_最优模型.pkl")

    # 将堆叠模型添加到结果列表
    stacking_result = pd.DataFrame([{
        "模型名称": "堆叠模型",
        "交叉验证AUC": round(cv_auc, 4),
        "交叉验证准确率": round(cv_acc, 4),
        "模型管道": stacking_pipeline
    }])

    return pd.concat([stacking_result, base_models], ignore_index=True)


def evaluate_on_test_set(X_test, y_test, models):
    """加载模型并在测试集上评估"""
    # 加载模型并预测
    y_preds = {}
    y_probs = {}
    for name in models["模型名称"]:
        with open(f"{name}_最优模型.pkl", "rb") as f:
            pipeline = pickle.load(f)
        y_pred = pipeline.predict(X_test)
        y_prob = pipeline.predict_proba(X_test)[:, 1]  # 正类概率
        y_preds[name] = y_pred
        y_probs[name] = y_prob

    # 计算评估指标
    eval_results = []
    for name in models["模型名称"]:
        y_pred = y_preds[name]
        y_prob = y_probs[name]
        eval_results.append({
            "模型名称": name,
            "准确率": round(accuracy_score(y_test, y_pred), 4),
            "精确率": round(precision_score(y_test, y_pred), 4),
            "召回率": round(recall_score(y_test, y_pred), 4),
            "F1分数": round(f1_score(y_test, y_pred), 4),
            "AUC": round(roc_auc_score(y_test, y_prob), 4)
        })

    eval_df = pd.DataFrame(eval_results)
    print("\n测试集评估结果:\n", eval_df)

    # 绘制混淆矩阵
    for name in models["模型名称"]:
        y_pred = y_preds[name]
        cm = confusion_matrix(y_test, y_pred)
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
                    xticklabels=["未离职(0)", "离职(1)"],
                    yticklabels=["未离职(0)", "离职(1)"])
        plt.title(f"{name}混淆矩阵", fontsize=12)
        plt.xlabel("预测标签", fontsize=10)
        plt.ylabel("实际标签", fontsize=10)
        plt.tight_layout()
        plt.savefig(f"{name}_混淆矩阵.png", dpi=300)
        plt.close()
        print(f"已保存混淆矩阵: {name}_混淆矩阵.png")

    # 保存评估结果
    eval_df.to_csv("模型评估指标汇总.csv", index=False)
    print("评估指标已保存至: 模型评估指标汇总.csv")


if __name__ == "__main__":
    # 主流程执行
    print("====== 1. 数据加载与预处理 ======")
    X_train, y_train, X_test, y_test = load_and_preprocess_data(
        r"D:\WorkArea\WorkSpace\Python\talents_loss\data\raw\train.csv",
        r"/data/raw/test.csv")

    print("\n====== 2. 构建预处理管道 ======")
    preprocessor, _, _ = build_preprocessor(X_train)

    print("\n====== 3. 模型训练与Top3选择（网格搜索+交叉验证）======")
    top3_models = train_with_grid_search(X_train, y_train, preprocessor, top_n=3)

    print("\n====== 4. 训练堆叠模型 ======")
    all_models = train_stacking_model(X_train, y_train, preprocessor, top3_models)

    print("\n====== 5. 测试集评估与可视化 ======")
    evaluate_on_test_set(X_test, y_test, all_models)

    print("\n所有流程完成！")