"""
人才流失预测 - XGBoost模型
按照四个步骤进行：
1. 数据读取
2. 数据清洗
3. 特征工程
4. 模型训练和测试
"""

import pandas as pd
import numpy as np
from typing import Tuple
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings('ignore')

# ==================== 配置参数 ====================
RANDOM_STATE = 108
TRAIN_PATH = "../data/train.csv"
TEST_PATH = "../data/test2.csv"
TARGET_COL = "Attrition"


# ==================== 第一步：数据读取 ====================
def step1_load_data(train_path: str, test_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    第一步：读取训练集和测试集数据
    
    Args:
        train_path: 训练数据路径
        test_path: 测试数据路径
        
    Returns:
        Tuple[pd.DataFrame, pd.DataFrame]: 训练集和测试集
    """
    print("=" * 60)
    print("第一步：数据读取")
    print("=" * 60)
    
    train_df = pd.read_csv(train_path, encoding="utf-8")
    test_df = pd.read_csv(test_path, encoding="utf-8")
    
    print(f"训练集形状: {train_df.shape}")
    print(f"测试集形状: {test_df.shape}")
    print(f"\n训练集前5行:\n{train_df.head()}")
    print(f"\n数据列信息:\n{train_df.info()}")
    print(f"\n目标变量分布:\n{train_df[TARGET_COL].value_counts()}")
    
    return train_df, test_df


# ==================== 第二步：数据清洗 ====================
def step2_data_cleaning(train_df: pd.DataFrame, test_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    第二步：数据清洗
    - 检查缺失值
    - 处理异常值
    - 删除无用列
    - 处理重复数据
    
    Args:
        train_df: 训练集
        test_df: 测试集
        
    Returns:
        Tuple[pd.DataFrame, pd.DataFrame]: 清洗后的训练集和测试集
    """
    print("\n" + "=" * 60)
    print("第二步：数据清洗")
    print("=" * 60)
    
    train_cleaned = train_df.copy()
    test_cleaned = test_df.copy()
    
    # 2.1 检查缺失值
    print("\n2.1 缺失值检查")
    train_missing = train_cleaned.isnull().sum()
    print(f"训练集缺失值:\n{train_missing[train_missing > 0]}")
    if train_missing.sum() == 0:
        print("训练集无缺失值")
    
    test_missing = test_cleaned.isnull().sum()
    print(f"\n测试集缺失值:\n{test_missing[test_missing > 0]}")
    if test_missing.sum() == 0:
        print("测试集无缺失值")
    
    # 2.2 删除无用列（如果存在）
    print("\n2.2 删除无用列")
    # Over18列全部为Y，StandardHours全部为80，没有区分度
    useless_cols = []
    for col in train_cleaned.columns:
        if train_cleaned[col].nunique() == 1:
            useless_cols.append(col)
    
    if useless_cols:
        print(f"删除无区分度的列: {useless_cols}")
        train_cleaned = train_cleaned.drop(columns=useless_cols)
        # 测试集也删除相同的列（如果存在）
        test_cleaned = test_cleaned.drop(columns=[c for c in useless_cols if c in test_cleaned.columns])
    else:
        print("没有需要删除的无用列")
    
    # 2.3 检查重复数据
    print("\n2.3 检查重复数据")
    train_duplicates = train_cleaned.duplicated().sum()
    print(f"训练集重复行数: {train_duplicates}")
    if train_duplicates > 0:
        train_cleaned = train_cleaned.drop_duplicates()
        print(f"已删除重复数据，剩余 {len(train_cleaned)} 行")
    
    # 2.4 数据类型检查
    print("\n2.4 数据类型检查")
    print(f"分类特征: {train_cleaned.select_dtypes(include=['object']).columns.tolist()}")
    print(f"数值特征: {train_cleaned.select_dtypes(include=[np.number]).columns.tolist()}")
    
    print(f"\n清洗后训练集形状: {train_cleaned.shape}")
    print(f"清洗后测试集形状: {test_cleaned.shape}")
    
    return train_cleaned, test_cleaned


# ==================== 第三步：特征工程 ====================
def step3_feature_engineering(train_df: pd.DataFrame, test_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    第三步：特征工程
    - 创建派生特征
    - 交互特征
    - 对分类特征进行编码
    
    Args:
        train_df: 清洗后的训练集
        test_df: 清洗后的测试集
        
    Returns:
        Tuple[pd.DataFrame, pd.DataFrame]: 特征工程后的训练集和测试集
    """
    print("\n" + "=" * 60)
    print("第三步：特征工程")
    print("=" * 60)
    
    train_fe = train_df.copy()
    test_fe = test_df.copy()
    
    # 3.1 创建派生特征
    print("\n3.1 创建派生特征")
    
    # 工作年限相关特征
    if {"YearsAtCompany", "NumCompaniesWorked"}.issubset(train_fe.columns):
        train_fe["YearsAtCompanyPerRole"] = train_fe["YearsAtCompany"] / (train_fe["NumCompaniesWorked"] + 1)
        test_fe["YearsAtCompanyPerRole"] = test_fe["YearsAtCompany"] / (test_fe["NumCompaniesWorked"] + 1)
        print("创建特征: YearsAtCompanyPerRole (每个公司的平均工作年限)")
    
    # 收入相关特征
    if {"MonthlyIncome"}.issubset(train_fe.columns):
        train_fe["IncomePerYear"] = train_fe["MonthlyIncome"] * 12
        test_fe["IncomePerYear"] = test_fe["MonthlyIncome"] * 12
        print("创建特征: IncomePerYear (年收入)")
    
    # 工作生活平衡特征
    if {"YearsAtCompany", "TotalWorkingYears"}.issubset(train_fe.columns):
        train_fe["WorkLifeBalance_Ratio"] = train_fe["YearsAtCompany"] / (train_fe["TotalWorkingYears"] + 1)
        test_fe["WorkLifeBalance_Ratio"] = test_fe["YearsAtCompany"] / (test_fe["TotalWorkingYears"] + 1)
        print("创建特征: WorkLifeBalance_Ratio (当前公司工作年限占比)")
    
    # 3.2 创建交互特征
    print("\n3.2 创建交互特征")
    
    if {"Age", "JobLevel"}.issubset(train_fe.columns):
        train_fe["Age_x_JobLevel"] = train_fe["Age"] * train_fe["JobLevel"]
        test_fe["Age_x_JobLevel"] = test_fe["Age"] * test_fe["JobLevel"]
        print("创建特征: Age_x_JobLevel (年龄与职位等级交互)")
    
    if {"DistanceFromHome", "OverTime"}.issubset(train_fe.columns):
        overtime_train = train_fe["OverTime"].map({"Yes": 1, "No": 0})
        overtime_test = test_fe["OverTime"].map({"Yes": 1, "No": 0})
        train_fe["Dist_x_OverTime"] = train_fe["DistanceFromHome"] * overtime_train
        test_fe["Dist_x_OverTime"] = test_fe["DistanceFromHome"] * overtime_test
        print("创建特征: Dist_x_OverTime (距离与加班交互)")
    
    # 3.3 对分类特征进行编码
    print("\n3.3 对分类特征进行编码（LabelEncoder）")
    
    # 获取所有分类特征
    categorical_cols = train_fe.select_dtypes(include=['object']).columns.tolist()
    # 排除目标列
    if TARGET_COL in categorical_cols:
        categorical_cols.remove(TARGET_COL)
    
    print(f"需要编码的分类特征: {categorical_cols}")
    
    # 对每个分类特征进行编码
    label_encoders = {}
    for col in categorical_cols:
        le = LabelEncoder()
        # 合并训练集和测试集的类别，确保编码一致
        combined = pd.concat([train_fe[col], test_fe[col]], axis=0)
        le.fit(combined.astype(str))
        
        train_fe[col] = le.transform(train_fe[col].astype(str))
        test_fe[col] = le.transform(test_fe[col].astype(str))
        label_encoders[col] = le
        print(f"  编码 {col}: {len(le.classes_)} 个类别")
    
    print(f"\n特征工程后训练集形状: {train_fe.shape}")
    print(f"特征工程后测试集形状: {test_fe.shape}")
    
    return train_fe, test_fe


# ==================== 第四步：模型训练和测试 ====================
def step4_model_training_testing(train_df: pd.DataFrame, test_df: pd.DataFrame):
    """
    第四步：模型训练和测试
    - 划分训练集和验证集
    - 构建XGBoost模型
    - 网格搜索调优
    - 模型评估
    
    Args:
        train_df: 特征工程后的训练集
        test_df: 特征工程后的测试集
    """
    print("\n" + "=" * 60)
    print("第四步：模型训练和测试")
    print("=" * 60)
    
    # 4.1 划分特征和标签
    print("\n4.1 划分特征和标签")
    y_train = train_df[TARGET_COL]
    X_train = train_df.drop(columns=[TARGET_COL])
    
    print(f"特征数量: {X_train.shape[1]}")
    print(f"样本数量: {X_train.shape[0]}")
    print(f"正负样本分布:\n{y_train.value_counts()}")
    
    # 4.2 划分训练集和验证集
    print("\n4.2 划分训练集和验证集 (80% 训练, 20% 验证)")
    X_train_split, X_valid, y_train_split, y_valid = train_test_split(
        X_train, y_train, 
        test_size=0.2, 
        random_state=RANDOM_STATE, 
        stratify=y_train
    )
    
    print(f"训练集大小: {X_train_split.shape[0]}")
    print(f"验证集大小: {X_valid.shape[0]}")
    
    # 4.3 构建XGBoost模型
    print("\n4.3 构建XGBoost模型")
    
    # 直接使用优化的参数（避免耗时的网格搜索）
    xgb_model = XGBClassifier(
        random_state=RANDOM_STATE,
        n_estimators=100,  # 减少树的数量，加快训练
        max_depth=5,
        learning_rate=0.1,
        subsample=0.8,
        colsample_bytree=0.8,
        min_child_weight=1,
        reg_lambda=1.0,
        gamma=0.0,
        objective="binary:logistic",
        eval_metric="auc",
        tree_method="hist",
        n_jobs=-1,
        use_label_encoder=False,
    )
    
    print("训练XGBoost模型...")
    xgb_model.fit(
        X_train_split, 
        y_train_split,
        verbose=True  # 显示训练进度
    )
    
    print(f"模型训练完成！")
    
    # 4.4 在验证集上评估
    print("\n4.4 在验证集上评估模型")
    best_model = xgb_model
    
    # 预测概率
    y_valid_proba = best_model.predict_proba(X_valid)[:, 1]
    y_valid_pred = best_model.predict(X_valid)
    
    # 计算AUC
    auc_valid = roc_auc_score(y_valid, y_valid_proba)
    print(f"验证集 AUC: {auc_valid:.6f}")
    
    # 混淆矩阵
    print(f"\n混淆矩阵:\n{confusion_matrix(y_valid, y_valid_pred)}")
    
    # 分类报告
    print(f"\n分类报告:\n{classification_report(y_valid, y_valid_pred)}")
    
    # 4.5 特征重要性
    print("\n4.5 特征重要性 (Top 10)")
    feature_importance = pd.DataFrame({
        'Feature': X_train.columns,
        'Importance': best_model.feature_importances_
    }).sort_values(by='Importance', ascending=False)
    
    print(feature_importance.head(10))
    
    # 4.6 在测试集上预测
    print("\n4.6 在测试集上预测")
    if TARGET_COL in test_df.columns:
        # 测试集包含标签
        X_test = test_df.drop(columns=[TARGET_COL])
        y_test = test_df[TARGET_COL]
        
        # 对齐列
        X_test = X_test.reindex(columns=X_train.columns, fill_value=0)
        
        # 预测
        y_test_proba = best_model.predict_proba(X_test)[:, 1]
        y_test_pred = best_model.predict(X_test)
        
        # 计算AUC
        auc_test = roc_auc_score(y_test, y_test_proba)
        print(f"测试集 AUC: {auc_test:.6f}")
        
        # 混淆矩阵
        print(f"\n测试集混淆矩阵:\n{confusion_matrix(y_test, y_test_pred)}")
        
        # 分类报告
        print(f"\n测试集分类报告:\n{classification_report(y_test, y_test_pred)}")
    else:
        # 测试集不包含标签，生成提交文件
        X_test = test_df
        X_test = X_test.reindex(columns=X_train.columns, fill_value=0)
        
        y_test_proba = best_model.predict_proba(X_test)[:, 1]
        
        # 保存预测结果
        submission = pd.DataFrame({
            'Id': range(len(y_test_proba)),
            TARGET_COL: y_test_proba
        })
        submission.to_csv('./data/submission_xgboost.csv', index=False)
        print("测试集无标签，预测结果已保存至 ./data/submission_xgboost.csv")


# ==================== 主函数 ====================
def run(train_path: str = TRAIN_PATH, test_path: str = TEST_PATH):
    """
    运行完整的四步流程
    
    Args:
        train_path: 训练数据路径
        test_path: 测试数据路径
    """
    # 第一步：数据读取
    train_df, test_df = step1_load_data(train_path, test_path)
    
    # 第二步：数据清洗
    train_cleaned, test_cleaned = step2_data_cleaning(train_df, test_df)
    
    # 第三步：特征工程
    train_fe, test_fe = step3_feature_engineering(train_cleaned, test_cleaned)
    
    # 第四步：模型训练和测试
    step4_model_training_testing(train_fe, test_fe)
    
    print("\n" + "=" * 60)
    print("所有步骤完成！")
    print("=" * 60)


if __name__ == "__main__":
    run()


