# -*- coding: utf-8 -*-
"""
人才流失分析项目
AAA
项目目标：基于员工数据预测人才流失情况
数据集：训练集1100行，测试集350行
特征：30个员工相关特征
目标变量：Attrition（离职情况）

技术栈：pandas, numpy, matplotlib, seaborn, scikit-learn, xgboost
"""

# 1. 导入必要的库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
import seaborn as sns
import warnings

warnings.filterwarnings('ignore')

# 数据预处理和特征工程
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline


# 机器学习模型
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier # 需要先下载lightgbm，代码：pip install lightgbm -i https://pypi.tuna.tsinghua.edu.cn/simple

# 模型评估与选择
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report,
                             roc_auc_score, roc_curve, auc, precision_recall_curve)

# 设置可视化风格
sns.set_style("whitegrid")
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


# 2. 数据加载
def load_data():
    """
    加载训练集和测试集数据
    """
    try:
        train_df = pd.read_csv('../project/data/raw/train.csv')
        test_df = pd.read_csv('../project/data/raw/test2.csv')
        print("数据加载成功!")
        print(f"训练集形状: {train_df.shape}")
        print(f"测试集形状: {test_df.shape}")
        return train_df, test_df
    except FileNotFoundError:
        print("找不到数据文件，请检查文件路径")
        return None, None


# 3. 数据探索分析(EDA)
def exploratory_data_analysis(train_df, test_df):
    """
    执行探索性数据分析
    """
    print("\n=== 数据探索分析(EDA) ===\n")

    # 合并数据以便分析
    full_df = pd.concat([train_df, test_df], ignore_index=True)

    # 3.1 基本信息
    print("1. 数据集基本信息:")
    print(full_df.info())

    print("\n2. 数值型特征描述性统计:")
    print(full_df.describe())

    print("\n3. 类别型特征描述性统计:")
    print(full_df.describe(include=['O']))

    # 3.2 缺失值分析
    print("\n4. 缺失值分析:")
    missing_data = full_df.isnull().sum()
    missing_percent = (missing_data[missing_data > 0] / len(full_df)) * 100
    if not missing_percent.empty:
        print("存在缺失值的特征:")
        print(missing_percent)
    else:
        print("没有缺失值!")

    # 3.3 目标变量分布
    print("\n5. 目标变量分布:")
    plt.figure(figsize=(10, 6))
    attrition_counts = train_df['Attrition'].value_counts()
    plt.pie(attrition_counts, labels=['未离职', '离职'], autopct='%1.1f%%', startangle=90)
    plt.title('员工离职比例分布')
    plt.savefig('attrition_distribution.png')
    plt.show()

    # 3.4 数值型特征分布
    print("\n6. 数值型特征分布:")
    numeric_features = train_df.select_dtypes(include=[np.number]).columns.tolist()
    numeric_features.remove('Attrition')  # 移除目标变量

    # 绘制数值型特征的分布
    plt.figure(figsize=(16, 12))
    for i, col in enumerate(numeric_features[:12]):  # 只显示前12个
        plt.subplot(3, 4, i + 1)
        sns.histplot(train_df[col], kde=True)
        plt.title(f'{col}分布')
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('numeric_features_distribution.png')
    plt.show()

    # 3.5 类别型特征分布
    print("\n7. 类别型特征分布:")
    categorical_features = train_df.select_dtypes(include=['object']).columns.tolist()

    plt.figure(figsize=(16, 15))  # 调整图像大小以适应更多子图
    for i, col in enumerate(categorical_features):
        plt.subplot(3, 3, i + 1)  # 修改为3行3列，最多可容纳9个子图
        sns.countplot(data=train_df, x=col)
        plt.title(f'{col}分布')
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('categorical_features_distribution.png')
    plt.show()


    # 3.6 特征与目标变量的关系
    print("\n8. 特征与目标变量的关系分析:")

    # 数值型特征与目标变量的关系
    plt.figure(figsize=(16, 12))
    for i, col in enumerate(numeric_features[:12]):
        plt.subplot(3, 4, i + 1)
        sns.boxplot(x='Attrition', y=col, data=train_df)
        plt.title(f'{col} vs Attrition')
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('numeric_vs_attrition.png')
    plt.show()

    # 类别型特征与目标变量的关系
    plt.figure(figsize=(16, 15))  # 调整图像大小以适应更多子图
    for i, col in enumerate(categorical_features):
        plt.subplot(3, 3, i + 1)  # 修改为3行3列，最多可容纳9个子图
        sns.countplot(data=train_df, x=col, hue='Attrition')
        plt.title(f'{col} vs Attrition')
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('categorical_vs_attrition.png')
    plt.show()


    # 3.7 特征相关性分析
    print("\n9. 特征相关性分析:")
    # 计算相关性矩阵 - 只对数值型特征
    numeric_df = train_df.select_dtypes(include=[np.number])
    correlation_matrix = numeric_df.corr()

    # 绘制热力图
    plt.figure(figsize=(16, 12))
    sns.heatmap(correlation_matrix, annot=False, cmap='coolwarm', center=0)
    plt.title('特征相关性热力图')
    plt.savefig('correlation_heatmap.png')
    plt.show()

    # 与目标变量最相关的特征
    correlation_with_target = correlation_matrix['Attrition'].abs().sort_values(ascending=False)
    print("与目标变量最相关的特征:")
    print(correlation_with_target[1:11])  # 排除目标变量本身


    return full_df


# 4. 特征工程
def feature_engineering(train_df, test_df):
    """
    执行特征工程处理
    """
    print("\n=== 特征工程 ===\n")

    # 分离特征和目标变量
    X_train = train_df.drop('Attrition', axis=1)
    y_train = train_df['Attrition']

    X_test = test_df.drop('Attrition', axis=1)
    y_test = test_df['Attrition']

    # 4.1 识别特征类型
    numeric_features = X_train.select_dtypes(include=[np.number]).columns.tolist()
    categorical_features = X_train.select_dtypes(include=['object']).columns.tolist()

    print(f"数值型特征: {numeric_features}")
    print(f"类别型特征: {categorical_features}")

    # 4.2 预处道
    # 数值型特征处理：填充缺失值并标准化
    numeric_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='median')),
        ('scaler', StandardScaler())
    ])

    # 类别型特征处理：填充缺失值并编码
    categorical_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
        ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))
    ])

    # 组合预处理步骤
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', numeric_transformer, numeric_features),
            ('cat', categorical_transformer, categorical_features)
        ])

    # 4.3 应用预处理
    print("应用预处理...")
    X_train_processed = preprocessor.fit_transform(X_train)
    X_test_processed = preprocessor.transform(X_test)

    # 获取特征名称（对于OneHot编码后的特征）
    onehot_columns = preprocessor.named_transformers_['cat'].named_steps['onehot'].get_feature_names_out(
        categorical_features)
    feature_names = np.concatenate([numeric_features, onehot_columns])

    print(f"预处理后的训练集形状: {X_train_processed.shape}")
    print(f"预处理后的测试集形状: {X_test_processed.shape}")

    return X_train_processed, y_train, X_test_processed, y_test, feature_names


# 5. 模型构建与训练
def build_and_train_models(X_train, y_train, feature_names):
    """
    构建和训练多个机器学习模型
    """
    print("\n=== 模型构建与训练 ===\n")

    # 5.1 定义要尝试的模型
    models = {
        'Logistic Regression': LogisticRegression(random_state=42, class_weight='balanced'),
        'Random Forest': RandomForestClassifier(random_state=42, class_weight='balanced'),
        'Gradient Boosting': GradientBoostingClassifier(random_state=42),
        'XGBoost': XGBClassifier(random_state=42, eval_metric='logloss'),
        'LightGBM': LGBMClassifier(random_state=42, class_weight='balanced')
    }

    # 5.2 使用交叉验证评估模型
    results = {}
    for name, model in models.items():
        cv_scores = cross_val_score(model, X_train, y_train, cv=5, scoring='roc_auc')
        results[name] = cv_scores
        print(f"{name}: AUC均值 = {cv_scores.mean():.4f} (±{cv_scores.std():.4f})")

    # 5.3 可视化模型比较
    plt.figure(figsize=(12, 8))
    model_names = list(results.keys())
    model_performance = [results[model].mean() for model in model_names]
    model_std = [results[model].std() for model in model_names]

    plt.bar(range(len(model_names)), model_performance, yerr=model_std, capsize=5)
    plt.xticks(range(len(model_names)), model_names, rotation=45)
    plt.ylabel('AUC Score')
    plt.title('模型性能比较 (5折交叉验证)')
    plt.tight_layout()
    plt.savefig('model_comparison.png')
    plt.show()

    # 5.4 选择最佳模型进行超参数调优
    print("\n进行超参数调优...")

    # 随机森林参数网格
    param_grid_rf = {
        'n_estimators': [100, 200],
        'max_depth': [None, 10, 20],
        'min_samples_split': [2, 5],
        'min_samples_leaf': [1, 2]
    }

    # 使用网格搜索进行超参数调优
    rf = RandomForestClassifier(random_state=42, class_weight='balanced')
    grid_search = GridSearchCV(rf, param_grid_rf, cv=5, scoring='roc_auc', n_jobs=-1)
    grid_search.fit(X_train, y_train)

    print(f"最佳参数: {grid_search.best_params_}")
    print(f"最佳交叉验证分数: {grid_search.best_score_:.4f}")

    # 使用最佳参数训练最终模型
    best_model = grid_search.best_estimator_

    # 5.5 特征重要性分析
    if hasattr(best_model, 'feature_importances_'):
        plt.figure(figsize=(12, 8))
        importances = best_model.feature_importances_
        indices = np.argsort(importances)[::-1]

        # 只显示前20个最重要的特征
        top_features = 20
        plt.title("特征重要性 (前20个)")
        plt.bar(range(top_features), importances[indices][:top_features])
        plt.xticks(range(top_features), [feature_names[i] for i in indices[:top_features]], rotation=90)
        plt.tight_layout()
        plt.savefig('feature_importance.png')
        plt.show()

    return best_model, models


# 6. 模型评估
def evaluate_model(model, X_test, y_test, X_train=None, y_train=None):
    """
    评估模型性能
    """
    print("\n=== 模型评估 ===\n")

    # 6.1 在测试集上进行预测
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1]

    # 6.2 计算评估指标
    accuracy = accuracy_score(y_test, y_pred)
    roc_auc = roc_auc_score(y_test, y_pred_proba)

    print(f"准确率: {accuracy:.4f}")
    print(f"AUC分数: {roc_auc:.4f}")

    # 6.3 混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title('混淆矩阵')
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    # plt.savefig('confusion_matrix.png')
    plt.show()

    # 6.4 分类报告
    print("\n分类报告:")
    print(classification_report(y_test, y_pred, target_names=['未离职', '离职']))

    # 6.5 ROC曲线
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
    roc_auc = auc(fpr, tpr)

    plt.figure(figsize=(10, 8))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {roc_auc:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title('接收者操作特征(ROC)曲线')
    plt.legend(loc="lower right")
    plt.savefig('roc_curve.png')
    plt.show()

    # 6.6 精确率-召回率曲线
    precision, recall, _ = precision_recall_curve(y_test, y_pred_proba)

    plt.figure(figsize=(10, 8))
    plt.plot(recall, precision, color='blue', lw=2)
    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.title('精确率-召回率曲线')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.savefig('precision_recall_curve.png')
    plt.show()

    return accuracy, roc_auc


# 7. 结果解释与业务建议
def generate_business_insights(model, feature_names, X_test_processed, test_df):
    """
    生成业务洞察和建议
    """
    print("\n=== 业务洞察与建议 ===\n")

    # 获取特征重要性
    if hasattr(model, 'feature_importances_'):
        importances = model.feature_importances_
        indices = np.argsort(importances)[::-1]

        # 显示前10个最重要的特征
        print("影响员工流失的前10个最重要特征:")
        for i in range(10):
            print(f"{i + 1}. {feature_names[indices[i]]}: {importances[indices[i]]:.4f}")

    # 分析测试集预测结果
    y_pred_proba = model.predict_proba(X_test_processed)[:, 1]
    test_df['Predicted_Attrition_Probability'] = y_pred_proba
    test_df['Predicted_Attrition'] = (y_pred_proba > 0.5).astype(int)

    # 高流失风险员工分析
    high_risk_employees = test_df[test_df['Predicted_Attrition_Probability'] > 0.7]
    print(f"\n高流失风险员工数量: {len(high_risk_employees)}")

    if not high_risk_employees.empty:
        # 分析高流失风险员工的共同特征
        print("\n高流失风险员工的共同特征:")

        # 分析年龄分布
        print(f"平均年龄: {high_risk_employees['Age'].mean():.1f}岁")

        # 分析月收入
        print(f"平均月收入: ${high_risk_employees['MonthlyIncome'].mean():.2f}")

        # 分析工作满意度
        print(f"平均工作满意度: {high_risk_employees['JobSatisfaction'].mean():.2f}/4")

        # 分析加班情况
        overtime_ratio = high_risk_employees['OverTime'].value_counts(normalize=True)
        if 'Yes' in overtime_ratio:
            print(f"加班比例: {overtime_ratio['Yes'] * 100:.1f}%")

    # 生成业务建议
    print("\n=== 人力资源建议 ===")
    print("1. 重点关注高流失风险员工，特别是年轻、低收入、低工作满意度的员工")
    print("2. 改善工作环境满意度，提供更多职业发展机会")
    print("3. 合理调整工作负荷，减少不必要的加班")
    print("4. 建立定期员工反馈机制，及时发现并解决员工问题")
    print("5. 优化薪酬体系，确保内部公平性和外部竞争力")

    return test_df


# 主函数
def main():
    """
    项目主函数
    """
    print("开始人才流失分析项目...")

    # 1. 加载数据
    train_df, test_df = load_data()
    if train_df is None or test_df is None:
        return

    # 2. 数据探索分析
    full_df = exploratory_data_analysis(train_df, test_df)

    # 3. 特征工程
    X_train_processed, y_train, X_test_processed, y_test, feature_names = feature_engineering(train_df, test_df)

    # 4. 模型构建与训练
    best_model, all_models = build_and_train_models(X_train_processed, y_train, feature_names)

    # 5. 模型评估
    accuracy, roc_auc = evaluate_model(best_model, X_test_processed, y_test, X_train_processed, y_train)

    # 6. 生成业务洞察
    test_df_with_predictions = generate_business_insights(best_model, feature_names, X_test_processed, test_df)

    # 7. 保存预测结果
    test_df_with_predictions.to_csv('test_predictions.csv', index=False)
    print("\n预测结果已保存到 'test_predictions.csv'")

    print("\n=== 项目完成 ===")
    print(f"最终模型在测试集上的表现:")
    print(f"- 准确率: {accuracy:.4f}")
    print(f"- AUC分数: {roc_auc:.4f}")


if __name__ == "__main__":
    main()