# 1. 导入必要库（小白：库是工具包，每个库有特定功能）
import pandas as pd
import numpy as np
import os
import sys
import datetime
import joblib
from pathlib import Path
# 机器学习相关库
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report,
                             precision_score, recall_score, f1_score)
from sklearn.feature_selection import SelectFromModel
from xgboost import XGBClassifier  # 比随机森林更优的分类模型，助于提升准确率
from imblearn.over_sampling import SMOTE  # 处理类别不平衡工具（流失样本少，用这个补）
# 可视化库
import matplotlib.pyplot as plt
import seaborn as sns

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 从util导入Logger
from util.logUtil import Logger


def create_folders():
    """创建必要的文件夹"""
    folders = ['model', 'log', 'data']
    for folder in folders:
        Path(folder).mkdir(parents=True, exist_ok=True)


# ==============================  主函数（整合所有步骤，一键运行） ==============================
def main():
    create_folders()
    logger = init_logger()
    try:
        x_train,x_test,y_train,y_test = load_and_explore_data(logger)
        x_train_prep,x_test_prep,y_train_balanced,preprocessor=preprocess_data(x_train,x_test,y_train,logger)
        x_train_selected, x_test_selected, selector, selected_features = select_features(
            x_train_prep, y_train_balanced, x_test_prep, logger
        )
        best_model, best_model_name, model_results = build_and_tune_model(x_train_selected, y_train_balanced, logger)
        accuracy,y_pred = evaluate_and_save_model(
            best_model,best_model_name,model_results,x_test_selected, y_test, selected_features, logger
        )
        logger.info('=' * 50)
        logger.info('🎉 人才流失预测模型训练全部完成！')
        logger.info(f'📊 最终测试集准确率：{accuracy * 100:.2f}%')
        logger.info(f'💾 模型保存路径：model/（带时间戳的.pkl文件）')
        logger.info(f'🖼️ 图表保存路径：data/（5类分析图）')
        logger.info(f'📄 日志保存路径：log/（训练过程记录）')
        logger.info('=' * 50)
        print(f'\n🎉 训练完成！最终准确率：{accuracy * 100:.2f}%')
        print(f'💡 模型在 model 文件夹，图表在 data 文件夹，日志在 log 文件夹')
    except Exception as e:
        # 如果中间出错，记录错误并提示
        logger.error(f'❌ 训练过程出错！错误原因：{str(e)}', exc_info=True)  # exc_info=True：记录详细错误栈
        print(f'\n❌ 训练出错：{str(e)}')
        print('💡 请查看 log 文件夹里的日志文件，找详细错误原因')
        raise


def init_logger():
    log_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    log_file_name = f'attrition_training_{log_time}'
    log_save_dir = Path('log')
    log_full_path = log_save_dir / f'{log_file_name}.log'
    logger = Logger(
        root_path = str(log_save_dir.absolute()),
        log_name = log_file_name,
        level = 'info'
    ).get_logger()
    logger.info('='*50)
    logger.info("🎉 人才流失预测模型训练开始")
    logger.info(f"📅 训练时间：{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"📄 日志文件保存路径：{log_full_path.absolute()}")
    logger.info("=" * 50)
    return logger


def load_and_explore_data(logger):
    try:
        train_df = pd.read_csv('../data/train.csv')
        test_df = pd.read_csv('../data/test.csv')
        logger.info("✅ 成功加载训练集和测试集！")
        x_train = train_df.drop('Attrition', axis=1)
        y_train = train_df['Attrition']
        x_test = test_df.drop('Attrition', axis=1)
        y_test = test_df['Attrition']
        logger.info(f"📊 数据维度：")
        logger.info(f"   - 训练集：{x_train.shape}（{x_train.shape[0]}行数据，{x_train.shape[1]}个特征）")
        logger.info(f"   - 测试集：{x_test.shape}（{x_test.shape[0]}行数据，{x_test.shape[1]}个特征）")

        missing_train = x_train.isnull().sum().sum()
        missing_test = x_test.isnull().sum().sum()
        logger.info(f"❌ 缺失值情况：")
        logger.info(f"   - 训练集：{missing_train}个（完美！）")
        logger.info(f"   - 测试集：{missing_test}个（完美！）")

        useless_features = ["Over18", "StandardHours"]
        x_train = x_train.drop(useless_features, axis=1)
        x_test = x_test.drop(useless_features, axis=1)
        logger.info(f"🗑️ 删除无意义特征：{useless_features}（这些特征所有样本值相同，没用）")

        global feature_name
        feature_name = x_train.columns.tolist()
        return x_train, x_test, y_train, y_test
    except FileNotFoundError as e:
        logger.error(f"❌ 数据文件没找到！错误原因：{e}")
        logger.error("💡 请检查train.csv和test.csv是否和代码在同一目录，或修改代码里的文件路径")
        raise


def preprocess_data(X_train, X_test, y_train, logger):
    logger.info("🔧 开始数据预处理（让模型能读懂数据）")
    numeric_feature_names = X_train.select_dtypes(include=["int64", "float64"]).columns.tolist()
    categorical_feature_names = X_train.select_dtypes(include=["object"]).columns.tolist()
    logger.info(f"📝 特征分类：")
    logger.info(f"   - 数值型特征（{len(numeric_feature_names)}个）：{numeric_feature_names}")
    logger.info(f"   - 分类型特征（{len(categorical_feature_names)}个）：{categorical_feature_names}")

    numeric_transformer = Pipeline(steps=[
        ('scaler',StandardScaler())
    ])
    categorical_transformer = Pipeline(steps=[
        ("onehot", OneHotEncoder(handle_unknown="ignore"))
    ])
    preprocessor = ColumnTransformer(
        transformers=[
            ("num", numeric_transformer, numeric_feature_names),
            ("cat", categorical_transformer, categorical_feature_names)
        ]
    )

    x_train_prep = preprocessor.fit_transform(X_train)
    x_test_prep = preprocessor.transform(X_test)
    smote = SMOTE(random_state=42)
    x_train_balanced, y_train_balanced = smote.fit_resample(x_train_prep, y_train)
    logger.info(f"⚖️ 用SMOTE平衡训练集：")
    logger.info(f"   - 平衡前：未流失{y_train.value_counts()[0]}个，流失{y_train.value_counts()[1]}个")
    logger.info( f"   - 平衡后：未流失{pd.Series(y_train_balanced).value_counts()[0]}个，流失{pd.Series(y_train_balanced).value_counts()[1]}个")
    preprocessor_path = Path('model') / 'preprocessor.pkl'
    joblib.dump(preprocessor,preprocessor_path)
    logger.info(f"💾 预处理工具已保存到：{preprocessor_path.absolute()}")
    global all_feature_names
    ohe = preprocessor.transformers_[1][1].named_steps["onehot"]
    cat_feature_names = ohe.get_feature_names_out(categorical_feature_names).tolist()
    all_feature_names = numeric_feature_names + cat_feature_names
    return x_train_balanced,x_test_prep,y_train_balanced,preprocessor


def select_features(x_train,y_train,x_test,logger):
    logger.info("🎯 开始特征选择（挑有用的特征，丢没用的）")
    selector = SelectFromModel(estimator=RandomForestClassifier(random_state=42),
                               threshold='mean')
    x_train_selected = selector.fit_transform(x_train,y_train)
    x_test_selected = selector.transform(x_test)
    selected_mask = selector.get_support()  # 哪些特征被保留（True=保留，False=丢弃）
    selected_features = [name for name, mask in zip(all_feature_names, selected_mask) if mask]
    logger.info(f"📊 特征选择结果：")
    logger.info(f"   - 原始特征数：{len(all_feature_names)}个")
    logger.info(f"   - 保留特征数：{len(selected_features)}个")
    logger.info(f"   - 保留的特征：{selected_features}")

    selector_path = Path("model") / "feature_selector.pkl"
    joblib.dump(selector, selector_path)
    logger.info(f"💾 特征选择器已保存到：{selector_path.absolute()}")

    return x_train_selected, x_test_selected, selector, selected_features


#==============================  构建+调优模型（核心！找最准的模型） ==============================
def build_and_tune_model(x_train,y_train,logger):
    logger.info("🚀 开始构建和调优模型（找最准的参数组合）")
    models = {
        'XGBoost':XGBClassifier(
            objective='binary:logistic',
            eval_metric='logloss',
            random_state=42
        ),'RandomForest':RandomForestClassifier(random_state=42,class_weight='balanced')
    }

    param_grids = {
        "XGBoost": {
            "n_estimators": [200, 300],  # 树的数量（越多越准，但太慢，选200/300）
            "max_depth": [3, 5],  # 树的深度（太深会“过拟合”，选3/5）
            "learning_rate": [0.05, 0.1],  # 学习率（步长，太小太慢，选0.05/0.1）
            "subsample": [0.8, 1.0]  # 每棵树用多少样本（0.8=用80%样本，防过拟合）
        },
        "RandomForest": {
            "n_estimators": [200, 300],  # 树的数量
            "max_depth": [None, 10],  # 树的深度（None=不限制）
            "min_samples_split": [2, 5]  # 分裂节点需要的最少样本数
        }
    }
    best_model = None
    best_accuracy = 0
    best_model_name = ""
    model_results = {}
    for model_name, model in models.items():
        logger.info(f"🔍 正在调优 {model_name} 模型...")
        grid_search = GridSearchCV(
            estimator=model,
            param_grid=param_grids[model_name],
            cv=3,  # 3折交叉验证（把训练集分3份，轮流当验证集）
            scoring="accuracy",  # 用“准确率”判断好坏
            n_jobs=-1,  # 用所有CPU核心加速（快很多！）
            verbose=1  # 打印调优过程（让你知道在跑）
        )

        # 拟合训练集，找最佳参数
        grid_search.fit(x_train, y_train)

        # 记录结果
        model_results[model_name] = grid_search.best_score_
        logger.info(f"✅ {model_name} 调优完成：")
        logger.info(f"   - 最佳参数：{grid_search.best_params_}")
        logger.info(f"   - 交叉验证准确率：{grid_search.best_score_ * 100:.2f}%")

        # 选准确率最高的当“最佳模型”
        if grid_search.best_score_ > best_accuracy:
            best_accuracy = grid_search.best_score_
            best_model = grid_search.best_estimator_
            best_model_name = model_name

        # 最终结果
    logger.info("=" * 50)
    logger.info(f"🏆 模型对比结果：")
    for name, acc in model_results.items():
        logger.info(f"   - {name}：{acc * 100:.2f}%")
    logger.info(f"🏆 最佳模型：{best_model_name}（准确率：{best_accuracy * 100:.2f}%）")
    logger.info("=" * 50)

    return best_model, best_model_name, model_results


def plot_all_charts(best_model,model_results,x_test,y_test,y_pred,selected_features,logger):
    logger.info("🖼️ 开始生成所有分析图表（散点图、折线图、对比图等）")
    plt.rcParams['font.sans-serif'] = ['WenQuanYi Zen Hei', 'SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    plt.rcParams['figure.dpi'] = 300
    current_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')


    def plot_model_comparison():
        model_name = list(model_results.keys())
        accuracies = [acc*100 for acc in model_results.values()]
        plt.figure(figsize=(8,5))
        sns.barplot(x=model_name,y=accuracies,palette='Blues')
        for i,acc in enumerate(accuracies):
            plt.text(i,acc+0.5,f'{acc:.2f}%',ha='center',fontdict=12)
        plt.xlabel('模型名称',fontdict=12)
        plt.ylabel('交叉验证准确率（%）',fontdict=12)
        plt.title('不同模型准确率对比',fontdict=14,fontweight= 'bold')
        plt.ylim(85,95)
        save_path = Path('data') / f'model_comparison_{current_time}.png'
        plt.savefig(save_path,bbox_inches='tight')
        plt.close()
        logger.info(f'  - 模型对比图已保存：{save_path.name}')

# -------------------------- 月薪vs年龄散点图（按流失分组） --------------------------
    def plot_sallary_age_scatter():
        train_df = pd.read_csv('../data/train.csv')
        plot_df= train_df[['Age','MonthlyIncome','Attrition']].copy()
        plot_df['Attrition_Label'] = plot_df['Attrition'].map({0: '未流失', 1: '流失'})
        plt.figure(figsize=(10,6))
        sns.scatterplot(
            data=plot_df,
            x='Age',
            y='MonthlyIncome',
            hue='Attrition_Label',
            palette=['green','red'],
            alpha=0.6
        )
        plt.xlabel('年龄',fontdict=12)
        plt.ylabel('月薪（美元）',fontdict=12)
        plt.title('年龄 vs 月薪 散点图（按流失状态分组）', fontsize=14, fontweight='bold')
        plt.legend(title="员工状态")
        save_path = Path('data') / f'salary_age_scatter_{current_time}.png'
        plt.savefig(save_path, bbox_inches='tight')
        plt.close()
        logger.info(f'   - 年龄月薪散点图已保存：{save_path.name}')

        # -------------------------- 工作年限vs流失率折线图 --------------------------
        def plot_tenure_attrition_rate():
            """看工作年限越长，流失率是不是越低（折线图）"""
            train_df = pd.read_csv("train.csv")
            # 按工作年限分组，计算每组的流失率
            tenure_attr = train_df.groupby("YearsAtCompany")["Attrition"].agg(["count", "sum"])
            tenure_attr["attrition_rate"] = tenure_attr["sum"] / tenure_attr["count"] * 100  # 流失率（%）
            # 只保留样本数>=5的组（避免少数样本导致的异常值）
            tenure_attr = tenure_attr[tenure_attr["count"] >= 5]

            plt.figure(figsize=(12, 6))
            sns.lineplot(
                x=tenure_attr.index,  # x轴=工作年限
                y=tenure_attr["attrition_rate"],  # y轴=流失率
                marker="o",  # 每个点画圆点
                linewidth=2,  # 线的粗细
                color="orange"
            )
            plt.xlabel("在公司工作年限（年）", fontsize=12)
            plt.ylabel("流失率（%）", fontsize=12)
            plt.title("工作年限 vs 员工流失率 折线图", fontsize=14, fontweight="bold")
            plt.grid(True, alpha=0.3)  # 加网格，方便看数值
            # 保存图片
            save_path = Path("data") / f"tenure_attrition_line_{current_time}.png"
            plt.savefig(save_path, bbox_inches="tight")
            plt.close()
            logger.info(f"   - 工作年限流失率折线图已保存：{save_path.name}")

        # -------------------------- 图4：特征重要性分析图（柱状图） --------------------------
        def plot_feature_importance():
            """看哪些特征对流失影响最大（比如月薪、工作年限）"""
            # 获取XGBoost模型的特征重要性
            if hasattr(best_model, 'feature_importances_'):
                importances = best_model.feature_importances_
            else:
                # 如果是随机森林，也用feature_importances_
                importances = best_model.feature_importances_

            # 按重要性排序（取前15个，太多看不清）
            importance_df = pd.DataFrame({
                'Feature': selected_features,
                'Importance': importances
            }).sort_values('Importance', ascending=False).head(15)

            plt.figure(figsize=(12, 8))
            sns.barplot(
                data=importance_df,
                x='Importance',
                y='Feature',
                palette='viridis'
            )
            plt.xlabel('特征重要性', fontsize=12)
            plt.ylabel('特征名称', fontsize=12)
            plt.title('人才流失预测 - 前15个重要特征', fontsize=14, fontweight='bold')
            # 保存图片
            save_path = Path('data') / f'feature_importance_{current_time}.png'
            plt.savefig(save_path, bbox_inches='tight')
            plt.close()
            logger.info(f'   - 特征重要性图已保存：{save_path.name}')

            # -------------------------- 混淆矩阵热力图（看预测对错情况） --------------------------
        def plot_confusion_matrix():
            cm = confusion_matrix(y_test,y_pred)
            labels = ['未流失','流失']
            plt.figure(figsize=(8,6))
            sns.heatmap(
                cm,
                annot=True,
                fmt='Blues',
                xticklabels=labels,
                yticklabels=labels
            )
            plt.xlabel('预测值', fontsize=12)
            plt.ylabel('真实值', fontsize=12)
            plt.title('人才流失预测混淆矩阵', fontsize=14, fontweight='bold')
            # 保存图片
            save_path = Path('data') / f'confusion_matrix_{current_time}.png'
            plt.savefig(save_path, bbox_inches='tight')
            plt.close()
            logger.info(f'   - 混淆矩阵图已保存：{save_path.name}')
            plot_model_comparison()
            plot_salary_age_scatter()
            plot_tenure_attrition_rate()
            plot_feature_importance()
            plot_confusion_matrix()
            logger.info('✅ 所有图表已保存到 data 文件夹！')


# ============================== 模型评估+保存（看模型准不准，存起来下次用） ==============================
def evaluate_and_save_model(best_model, best_model_name, model_results, x_test, y_test, selected_features, logger):
    logger.info('📋 开始评估最佳模型（用测试集“考试”）')
    y_pred = best_model.predict(x_test)
    accuracy = accuracy_score(y_test,y_pred)
    precision = precision_score(y_test,y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test,y_pred)
    logger.info('=' * 50)
    logger.info(f'🏆 最佳模型（{best_model_name}）测试集评估结果：')
    logger.info(f'   - 准确率：{accuracy * 100:.2f}%（整体预测对的比例）')
    logger.info(f'   - 精准率：{precision * 100:.2f}%（预测流失的员工中，真流失的比例）')
    logger.info(f'   - 召回率：{recall * 100:.2f}%（真流失的员工中，被预测对的比例）')
    logger.info(f'   - F1分数：{f1 * 100:.2f}%（精准率和召回率的平衡）')
    logger.info('=' * 50)

    # 4. 打印详细分类报告（每个类别的指标）
    class_report = classification_report(
        y_test, y_pred,
        target_names=['未流失', '流失'],
        digits=2
    )
    print('\n📄 详细分类报告：')
    print(class_report)
    logger.info(f'📄 详细分类报告：\n{class_report}')
    if accuracy >=0.9:
        logger.info('🎉 模型准确率达到90%以上，满足要求！')
    else:
        logger.warning('⚠️ 模型准确率未达到90%，建议调整超参数或增加特征工程！')
        logger.warning('💡 调整建议：1. 扩大超参数范围；2. 增加交互特征（如月薪*工作年限）；3. 换用更复杂的模型（如LightGBM）')
    model_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    model_path = Path('model') / f"best_attrition_model_{model_time}_{best_model_name}.pkl"
    joblib.dump(best_model, model_path)
    logger.info(f'💾 最佳模型已保存到：{model_path.absolute()}')

    # 7. 生成所有图表
    plot_all_charts(best_model, model_results, x_test, y_test, y_pred, selected_features, logger)
    return accuracy, y_pred


if __name__ == '__main__':
    feature_name = []
    all_feature_name = []
    selected_features_list = []
    main()
