#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
人才流失预测分析完整流程
包含：数据探索、特征工程、模型训练、评估与可视化
"""

import os
import datetime
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.metrics import (accuracy_score, roc_auc_score, classification_report,
                             confusion_matrix, roc_curve, precision_recall_curve,
                             average_precision_score)
from xgboost import XGBClassifier
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
import joblib
import shap
from tabulate import tabulate

# 设置中文显示和忽略警告
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 12
warnings.filterwarnings('ignore')

# 创建结果保存目录
os.makedirs('../results', exist_ok=True)


def load_data():
    """加载数据集"""
    train_df = pd.read_csv('../data/train.csv')
    test_df = pd.read_csv('../data/test.csv')
    return train_df, test_df


def data_analysis(train_df):
    """数据探索性分析"""
    print("\n===== 数据探索分析 =====")

    # 1. 目标变量分布
    plt.figure(figsize=(8, 6))
    attrition_counts = train_df['Attrition'].value_counts()
    plt.pie(attrition_counts,
            labels=['留职', '离职'],
            autopct='%1.1f%%',
            colors=['#66b3ff', '#ff9999'],
            explode=(0, 0.1))
    plt.title('人才流失比例分布')
    plt.savefig('../results/attrition_dist.png', bbox_inches='tight', dpi=300)
    plt.close()

    # 2. 数值特征分布
    num_features = ['Age', 'MonthlyIncome', 'TotalWorkingYears', 'YearsAtCompany']
    plt.figure(figsize=(14, 10))
    for i, col in enumerate(num_features):
        plt.subplot(2, 2, i + 1)
        sns.boxplot(x='Attrition', y=col, data=train_df, palette='Set2')
        plt.title(f'{col}分布 vs 流失情况')
    plt.tight_layout()
    plt.savefig('../results/numeric_features_dist.png', dpi=300)
    plt.close()

    # 3. 类别特征分布
    cat_features = ['Department', 'EducationField', 'JobRole', 'MaritalStatus']
    plt.figure(figsize=(14, 10))
    for i, col in enumerate(cat_features):
        plt.subplot(2, 2, i + 1)
        sns.countplot(x=col, hue='Attrition', data=train_df, palette='Set2')
        plt.title(f'{col}分布')
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('../results/categorical_features_dist.png', dpi=300)
    plt.close()

    # 4. 特征相关性
    plt.figure(figsize=(12, 10))
    numeric_cols = train_df.select_dtypes(include=['int64', 'float64']).columns
    corr_matrix = train_df[numeric_cols].corr()
    sns.heatmap(corr_matrix[['Attrition']].sort_values(by='Attrition', ascending=False),
                annot=True, cmap='coolwarm', center=0)
    plt.title('特征与流失率相关性')
    plt.savefig('../results/correlation_heatmap.png', bbox_inches='tight', dpi=300)
    plt.close()


def feature_engineering(train_df, test_df):
    """特征工程处理"""
    print("\n===== 特征工程处理 =====")

    # 删除无用特征
    drop_cols = ['EmployeeNumber', 'Over18', 'StandardHours']
    train_df = train_df.drop(drop_cols, axis=1)
    test_df = test_df.drop(drop_cols, axis=1)

    # 分离特征和目标
    X_train = train_df.drop('Attrition', axis=1)
    X_test = test_df.drop('Attrition', axis=1)
    y_train = train_df['Attrition']
    y_test = test_df['Attrition']

    # 类别特征编码
    categorical_cols = ['BusinessTravel', 'Department', 'EducationField',
                        'Gender', 'JobRole', 'MaritalStatus', 'OverTime']

    # 数值特征
    numeric_cols = X_train.select_dtypes(include=['int64', 'float64']).columns

    # 1. 独热编码
    ohe = OneHotEncoder(handle_unknown='ignore', sparse_output=False)
    X_train_ohe = ohe.fit_transform(X_train[categorical_cols])
    X_test_ohe = ohe.transform(X_test[categorical_cols])

    # 2. 数值特征标准化
    scaler = StandardScaler()
    X_train_num = scaler.fit_transform(X_train[numeric_cols])
    X_test_num = scaler.transform(X_test[numeric_cols])

    # 合并特征
    X_train_processed = np.hstack([X_train_num, X_train_ohe])
    X_test_processed = np.hstack([X_test_num, X_test_ohe])

    # 获取特征名称
    ohe_cols = ohe.get_feature_names_out(categorical_cols)
    all_cols = list(numeric_cols) + list(ohe_cols)

    # 转换为DataFrame
    X_train_processed = pd.DataFrame(X_train_processed, columns=all_cols)
    X_test_processed = pd.DataFrame(X_test_processed, columns=all_cols)

    # 处理样本不平衡
    smote = SMOTE(random_state=42)
    X_train_balanced, y_train_balanced = smote.fit_resample(X_train_processed, y_train)

    # 特征选择
    selector = RFECV(
        estimator=LogisticRegression(max_iter=1000, random_state=42),
        min_features_to_select=20,
        cv=StratifiedKFold(5),
        scoring='accuracy',
        n_jobs=-1
    )
    selector.fit(X_train_balanced, y_train_balanced)
    selected_features = X_train_balanced.columns[selector.support_]

    return (X_train_balanced[selected_features], y_train_balanced,
            X_test_processed[selected_features], y_test, selector, scaler, ohe)


def train_model(X_train, y_train, X_test, y_test):
    """模型训练与评估"""
    print("\n===== 模型训练与评估 =====")

    # 定义模型
    model = XGBClassifier(
        n_estimators=200,
        learning_rate=0.05,
        max_depth=3,
        min_child_weight=1,
        gamma=0.1,
        subsample=0.8,
        colsample_bytree=0.8,
        reg_alpha=1.0,
        reg_lambda=1.0,
        random_state=42,
        use_label_encoder=False,
        eval_metric='logloss'
    )

    # 训练模型
    model.fit(X_train, y_train)

    # 预测结果
    y_pred = model.predict(X_test)
    y_proba = model.predict_proba(X_test)[:, 1]

    # 评估指标
    train_pred = model.predict(X_train)
    train_acc = accuracy_score(y_train, train_pred)
    test_acc = accuracy_score(y_test, y_pred)
    roc_auc = roc_auc_score(y_test, y_proba)

    # 分类报告
    report_dict = classification_report(y_test, y_pred, output_dict=True)
    metrics_df = pd.DataFrame(report_dict).transpose()

    # 保存模型
    joblib.dump(model, '../results/best_model.pkl')

    return model, metrics_df, train_acc, test_acc, roc_auc


def evaluate_model(model, X_train, y_train, X_test, y_test):
    """模型评估与可视化"""
    print("\n===== 模型评估可视化 =====")

    # 预测概率
    y_proba = model.predict_proba(X_test)[:, 1]

    # 1. ROC曲线
    fpr, tpr, _ = roc_curve(y_test, y_proba)
    roc_auc = roc_auc_score(y_test, y_proba)

    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, label=f'ROC曲线 (AUC = {roc_auc:.2f})')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title('ROC曲线')
    plt.legend()
    plt.savefig('../results/roc_curve.png', bbox_inches='tight', dpi=300)
    plt.close()

    # 2. PR曲线
    precision, recall, _ = precision_recall_curve(y_test, y_proba)
    ap_score = average_precision_score(y_test, y_proba)

    plt.figure(figsize=(8, 6))
    plt.plot(recall, precision, label=f'PR曲线 (AP = {ap_score:.2f})')
    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.title('精确率-召回率曲线')
    plt.legend()
    plt.savefig('../results/pr_curve.png', bbox_inches='tight', dpi=300)
    plt.close()

    # 3. 特征重要性
    plt.figure(figsize=(10, 8))
    feat_importances = pd.Series(model.feature_importances_, index=X_train.columns)
    feat_importances.nlargest(15).sort_values().plot(kind='barh')
    plt.title('Top 15 重要特征')
    plt.savefig('../results/feature_importance.png', bbox_inches='tight', dpi=300)
    plt.close()

    # 4. SHAP解释
    explainer = shap.TreeExplainer(model)
    shap_values = explainer.shap_values(X_train)

    plt.figure(figsize=(10, 8))
    shap.summary_plot(shap_values, X_train, plot_type="bar", show=False)
    plt.title('SHAP特征重要性')
    plt.savefig('../results/shap_summary.png', bbox_inches='tight', dpi=300)
    plt.close()

    # 5. 保存评估结果
    evaluation_results = {
        'ROC_AUC': roc_auc,
        'Average_Precision': ap_score,
        'Train_Accuracy': accuracy_score(y_train, model.predict(X_train)),
        'Test_Accuracy': accuracy_score(y_test, model.predict(X_test)),
        'Precision': classification_report(y_test, model.predict(X_test), output_dict=True)['1']['precision'],
        'Recall': classification_report(y_test, model.predict(X_test), output_dict=True)['1']['recall'],
        'F1_Score': classification_report(y_test, model.predict(X_test), output_dict=True)['1']['f1-score']
    }

    pd.DataFrame([evaluation_results]).to_csv('../results/evaluation_metrics.csv', index=False)


def generate_report():
    """生成分析报告"""
    print("\n===== 分析报告生成 =====")
    # 这里可以添加自动生成PDF或Markdown报告的代码
    # 可以使用Jinja2模板或直接生成Markdown

    report_content = f"""
# 人才流失预测分析报告

## 1. 数据概况
- 训练集样本量: {len(train_df)}
- 测试集样本量: {len(test_df)}
- 流失比例: {attrition_counts[1] / sum(attrition_counts):.1%}

## 2. 模型表现
- 测试集AUC: {roc_auc:.4f}
- 测试集准确率: {test_acc:.4f}
- 精确率: {metrics_df.loc['1', 'precision']:.4f}
- 召回率: {metrics_df.loc['1', 'recall']:.4f}

## 3. 关键发现
1. 最重要的三个特征:
   - {feat_importances.nlargest(3).index[0]}
   - {feat_importances.nlargest(3).index[1]}
   - {feat_importances.nlargest(3).index[2]}

2. 业务建议:
   - 针对高流失风险群体制定保留计划
   - 关注关键影响因素的变化
"""

    with open('../results/analysis_report.md', 'w', encoding='utf-8') as f:
        f.write(report_content)

    print("分析报告已生成: ../results/analysis_report.md")


if __name__ == '__main__':
    # 1. 加载数据
    train_df, test_df = load_data()

    # 2. 数据探索分析
    data_analysis(train_df)
    attrition_counts = train_df['Attrition'].value_counts()

    # 3. 特征工程
    X_train, y_train, X_test, y_test, selector, scaler, ohe = feature_engineering(train_df, test_df)

    # 4. 模型训练
    model, metrics_df, train_acc, test_acc, roc_auc = train_model(X_train, y_train, X_test, y_test)

    # 5. 模型评估
    evaluate_model(model, X_train, y_train, X_test, y_test)
    feat_importances = pd.Series(model.feature_importances_, index=X_train.columns)

    # 6. 生成报告
    generate_report()

    print("\n===== 分析流程完成 =====")