import pandas as pd
import numpy as np
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (RandomForestClassifier, GradientBoostingClassifier,
                              AdaBoostClassifier, StackingClassifier)
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier


def train_and_evaluate(model, X_train, y_train, X_test, y_test, model_name, strategy):
    """训练并评估模型，返回包含关键指标的字典"""
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    # 计算预测概率（用于AUC）
    y_pred_proba = model.predict_proba(X_test)[:, 1] if hasattr(model, 'predict_proba') else y_pred
    auc = roc_auc_score(y_test, y_pred_proba)

    # 提取召回率（针对正类，假设正类标签为1）
    report_dict = classification_report(y_test, y_pred, output_dict=True)
    # 处理可能的标签命名差异（如1或'1'）
    positive_class = '1' if '1' in report_dict else 1
    recall = report_dict[positive_class]['recall']

    # 打印中间结果
    print(f"\n{model_name}（{strategy}）评估：")
    print(f"AUC: {auc:.4f} | 正类召回率: {recall:.4f}")

    return {
        'model_name': model_name,
        'strategy': strategy,
        'auc': auc,
        'recall': recall
    }


def get_resampled_data(X_train, y_train, strategy):
    """根据采样策略返回重采样后的训练数据"""
    if strategy == "原始数据":
        return X_train, y_train
    elif strategy == "随机过采样":
        ros = RandomOverSampler(random_state=42)
        return ros.fit_resample(X_train, y_train)
    elif strategy == "SMOTE过采样":
        smote = SMOTE(random_state=42)
        return smote.fit_resample(X_train, y_train)
    elif strategy == "随机欠采样":
        rus = RandomUnderSampler(random_state=42)
        return rus.fit_resample(X_train, y_train)
    else:
        raise ValueError(f"不支持的采样策略: {strategy}")


# 1. 数据加载与预处理
df = pd.read_csv(r'D:\WorkArea\WorkSpace\Python\talents_loss\data\raw\train.csv')

# 数据基本信息展示
print('数据基本信息：')
df.info()
rows, columns = df.shape
print(f"数据规模：{rows}行, {columns}列")

# 展示数据样本
print('\n数据样本：')
print(df.head().to_csv(sep='\t', na_rep='nan'))  # 简化展示，避免大文件打印问题

# 特征与目标变量划分
X = df.drop('Attrition', axis=1)
y = df['Attrition']

# 特征预处理（数值型标准化，分类型独热编码）
categorical_cols = X.select_dtypes(include=['object']).columns
numerical_cols = X.select_dtypes(exclude=['object']).columns

preprocessor = ColumnTransformer(
    transformers=[
        ('num', StandardScaler(), numerical_cols),
        ('cat', OneHotEncoder(handle_unknown='ignore'), categorical_cols)
    ])
X = preprocessor.fit_transform(X)

# 划分训练集和测试集（保持分层抽样）
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)


# 2. 定义待测试的模型（包括基础模型和堆叠模型）
def build_models():
    """构建模型字典，key为模型名称，value为模型实例"""
    base_models = {
        "逻辑回归": LogisticRegression(random_state=42, max_iter=1000),
        "决策树": DecisionTreeClassifier(random_state=42),
        "随机森林": RandomForestClassifier(random_state=42, n_estimators=100),
        "梯度提升树": GradientBoostingClassifier(random_state=42),
        "AdaBoost": AdaBoostClassifier(random_state=42),
        "SVM": SVC(random_state=42, probability=True),
        "KNN": KNeighborsClassifier(),
        "XGBoost": XGBClassifier(random_state=42, eval_metric='logloss'),
        "LightGBM": LGBMClassifier(random_state=42)
    }

    # 堆叠模型
    stacking_estimators = [
        ('rf', base_models["随机森林"]),
        ('xgb', base_models["XGBoost"]),
        ('svm', base_models["SVM"])
    ]
    base_models["堆叠模型"] = StackingClassifier(
        estimators=stacking_estimators,
        final_estimator=LogisticRegression(random_state=42, max_iter=1000)
    )

    # 带类别权重的模型
    base_models["逻辑回归(平衡权重)"] = LogisticRegression(
        random_state=42, max_iter=1000, class_weight='balanced'
    )
    base_models["随机森林(平衡权重)"] = RandomForestClassifier(
        random_state=42, n_estimators=100, class_weight='balanced'
    )

    return base_models


# 3. 实验配置
sampling_strategies = ["原始数据", "随机过采样", "SMOTE过采样", "随机欠采样"]
models = build_models()
results = []  # 存储所有模型的评估结果

# 4. 批量训练与评估（含采样策略）
for strategy in sampling_strategies:
    print(f"\n===== 采样策略：{strategy} =====")
    X_train_resampled, y_train_resampled = get_resampled_data(X_train, y_train, strategy)

    for model_name, model in models.items():
        if "平衡权重" in model_name:
            continue  # 平衡权重模型单独评估
        result = train_and_evaluate(
            model, X_train_resampled, y_train_resampled, X_test, y_test, model_name, strategy
        )
        results.append(result)

# 评估带类别权重的模型（策略标记为"类别权重调整"）
print(f"\n===== 处理策略：类别权重调整 =====")
for model_name, model in models.items():
    if "平衡权重" in model_name:
        result = train_and_evaluate(
            model, X_train, y_train, X_test, y_test, model_name, "类别权重调整"
        )
        results.append(result)

# 5. 筛选并展示TOP3模型（按AUC降序，召回率为辅）
print("\n\n===== 模型综合排名（TOP3） =====")
# 排序规则：先按AUC降序，再按召回率降序
sorted_results = sorted(results, key=lambda x: (-x['auc'], -x['recall']))

# 打印TOP3
for i, res in enumerate(sorted_results[:3], 1):
    print(f"\n第{i}名：")
    print(f"模型名称：{res['model_name']}")
    print(f"处理策略：{res['strategy']}")
    print(f"AUC：{res['auc']:.4f}")
    print(f"正类召回率：{res['recall']:.4f}")