# -*- coding: utf-8 -*-
"""
员工流失预测 —— 高级训练脚本（纯净 + 日志规范版）
- 特征工程完全委托给 club.preprocess_for_training
- LightGBM + CatBoost 双模型训练
- Optuna 超参搜索（防过拟合配置）
- 使用 OOF 预测确定最优分类阈值（现支持 F1）
- 保存模型 + 元信息（含最优阈值）

  日志说明：
  - 所有原 print 输出 → logger.info()
  - 错误/警告 → logger.error() / logger.warning()
  - 无真实 API 调用，故不调用 log_api_call
"""

import pandas as pd
import numpy as np
import lightgbm as lgb
import catboost as cb
import optuna
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
from sklearn.model_selection import StratifiedKFold
import os
import warnings
import club
import log_util  # 引入自定义日志工具
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, precision_recall_curve, auc


warnings.filterwarnings('ignore')


def find_best_threshold(y_true, y_proba, metric='f1'):
    """
    在给定概率预测上搜索最优分类阈值。
    - 默认使用 'f1'（适用于正负样本不平衡场景）
    - 搜索范围 [0.1, 0.5]，步长 0.01
    """
    best_thresh, best_score = 0.5, -1
    for thresh in np.arange(0.1, 0.51, 0.01):
        y_pred = (y_proba >= thresh).astype(int)
        if metric == 'accuracy':
            score = accuracy_score(y_true, y_pred)
        elif metric == 'f1':
            score = f1_score(y_true, y_pred)
        else:
            raise ValueError("metric must be 'accuracy' or 'f1'")
        if score > best_score:
            best_score, best_thresh = score, thresh
    return best_thresh, best_score


def train_lgb_with_optuna(X, y, cat_cols, pos_weight, logger):
    """
    使用 Optuna 对 LightGBM 进行超参数优化。
    - 目标：最大化 5 折交叉验证 AUC
    - 启用早停防止过拟合
    """
    logger.info(" 开始 LightGBM 超参优化（平衡泛化与性能）...")

    def objective(trial):
        # 定义搜索空间
        params = {
            'objective': 'binary',
            'boosting_type': 'gbdt',
            'random_state': 42,
            'verbose': -1,
            'metric': 'auc',
            'learning_rate': trial.suggest_float('learning_rate', 0.005, 0.02),
            'num_leaves': trial.suggest_int('num_leaves', 20, 50),
            'max_depth': trial.suggest_int('max_depth', 4, 7),
            'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 30, 100),
            'lambda_l1': trial.suggest_float('lambda_l1', 1.0, 10.0),
            'lambda_l2': trial.suggest_float('lambda_l2', 1.0, 10.0),
            'feature_fraction': trial.suggest_float('feature_fraction', 0.6, 0.9),
            'bagging_fraction': trial.suggest_float('bagging_fraction', 0.7, 0.9),
            'bagging_freq': trial.suggest_int('bagging_freq', 1, 3),
            'scale_pos_weight': pos_weight,  # 处理类别不平衡
        }

        num_boost = trial.suggest_int('num_boost_round', 600, 1200)

        # 5 折交叉验证
        skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=22)
        auc_scores = []

        for train_idx, val_idx in skf.split(X, y):
            X_tr, X_val = X.iloc[train_idx], X.iloc[val_idx]
            y_tr, y_val = y.iloc[train_idx], y.iloc[val_idx]

            # 构建 LightGBM 数据集（指定类别特征）
            train_set = lgb.Dataset(X_tr, y_tr, categorical_feature=cat_cols)
            val_set = lgb.Dataset(X_val, y_val, reference=train_set)

            # 训练模型（启用早停）
            model = lgb.train(
                params, train_set,
                valid_sets=[val_set],
                num_boost_round=num_boost,
                callbacks=[
                    lgb.early_stopping(stopping_rounds=80, verbose=False),
                    lgb.log_evaluation(period=0)  # 禁用训练日志输出
                ]
            )
            # 预测验证集
            y_pred = model.predict(X_val, num_iteration=model.best_iteration)
            auc_scores.append(roc_auc_score(y_val, y_pred))

        return np.mean(auc_scores)

    # 创建 Optuna 研究对象（最大化目标）
    study = optuna.create_study(direction='maximize')
    study.optimize(objective, n_trials=60, show_progress_bar=True)

    # 整合最优参数
    best_params = study.best_params
    best_params.update({
        'objective': 'binary',
        'metric': 'auc',
        'boosting_type': 'gbdt',
        'random_state': 42,
        'verbose': -1,
        'scale_pos_weight': pos_weight
    })

    # 在全量数据上训练最终模型
    dtrain = lgb.Dataset(X, y, categorical_feature=cat_cols)
    final_model = lgb.train(
        best_params,
        dtrain,
        num_boost_round=study.best_params['num_boost_round'],
        callbacks=[lgb.log_evaluation(period=0)]
    )

    logger.info(f"LightGBM 优化完成，最佳 CV AUC: {study.best_value:.5f}")
    return final_model, best_params, study.best_value


def train_catboost_with_optuna(X, y, cat_cols, pos_weight, logger):
    """
    使用 Optuna 对 CatBoost 进行超参数优化。
    - 自动处理类别特征
    - 启用贝叶斯 bootstrap 和早停
    """
    logger.info("开始 CatBoost 超参优化（高泛化版）...")

    def objective(trial):
        params = {
            'iterations': trial.suggest_int('iterations', 400, 900),
            'learning_rate': trial.suggest_float('learning_rate', 0.008, 0.03),
            'depth': trial.suggest_int('depth', 3, 6),
            'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 5.0, 20.0),
            'random_strength': trial.suggest_float('random_strength', 1.0, 5.0),
            'bagging_temperature': trial.suggest_float('bagging_temperature', 0.5, 2.0),
            'leaf_estimation_iterations': trial.suggest_int('leaf_estimation_iterations', 5, 15),
            'grow_policy': 'Depthwise',
            'bootstrap_type': 'Bayesian',
            'auto_class_weights': 'Balanced',  # 自动平衡类别权重
            'eval_metric': 'AUC',
            'random_seed': 42,
            'verbose': False,
            'allow_writing_files': False  # 禁止生成临时文件
        }

        skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        auc_scores = []

        for train_idx, val_idx in skf.split(X, y):
            X_tr, X_val = X.iloc[train_idx], X.iloc[val_idx]
            y_tr, y_val = y.iloc[train_idx], y.iloc[val_idx]

            model = cb.CatBoostClassifier(**params)
            model.fit(
                X_tr, y_tr,
                cat_features=cat_cols,
                eval_set=(X_val, y_val),
                early_stopping_rounds=100,
                verbose=False
            )
            y_pred = model.predict_proba(X_val)[:, 1]
            auc_scores.append(roc_auc_score(y_val, y_pred))

        return np.mean(auc_scores)

    study = optuna.create_study(direction='maximize')
    study.optimize(objective, n_trials=40, show_progress_bar=True)

    best_params = study.best_params
    best_params.update({
        'eval_metric': 'AUC',
        'random_seed': 42,
        'verbose': False,
        'allow_writing_files': False,
        'bootstrap_type': 'Bayesian'
    })

    # 全量训练最终模型
    final_model = cb.CatBoostClassifier(**best_params)
    final_model.fit(X, y, cat_features=cat_cols, verbose=False)

    logger.info(f"CatBoost 优化完成，最佳 CV AUC: {study.best_value:.5f}")
    return final_model, best_params, study.best_value


def main():
    # 初始化日志器（日志前缀为 "train"）
    logger = log_util.APICallLogger(root_path=".", log_prefix="train")

    logger.info("预测模型开始运行")
    logger.info("开始员工流失预测模型训练流程...")

    # === 1. 加载并预处理训练数据 ===
    df = pd.read_csv('./data/train.csv')
    # 假设第一列是标签（Attrition），其余为特征
    y = df.iloc[:, 0]
    X_raw = df.iloc[:, 1:]
    # 调用 club 模块进行标准化预处理（含缺失值、编码、采样等）
    X, y = club.preprocess_for_training(y, X_raw)

    # === 绘制流失分布图 ===
    plot_attrition_distribution(y)

    # === 2. 区分数值型与类别型特征 ===
    num_cols = X.select_dtypes(include=[np.number]).columns.tolist()
    cat_cols = [col for col in X.columns if col not in num_cols]

    # 将类别特征转为 'category' 类型（LightGBM 要求）
    for col in cat_cols:
        X[col] = X[col].astype('category')

    # 计算正负样本比例，用于 scale_pos_weight
    pos_weight = (y == 0).sum() / (y == 1).sum()
    logger.info(f"正负样本比例 (scale_pos_weight): {pos_weight:.2f}")
    logger.info(f"训练集形状: {X.shape}, 正样本率: {y.mean():.4f}")

    # === 3. 分别训练 LightGBM 和 CatBoost 模型 ===
    lgb_model, lgb_params, lgb_auc = train_lgb_with_optuna(X, y, cat_cols, pos_weight, logger)
    cb_model, cb_params, cb_auc = train_catboost_with_optuna(X, y, cat_cols, pos_weight, logger)

    # === 生成 Out-of-Fold (OOF) 预测用于阈值优化 ===
    logger.info("生成 OOF 预测以优化分类阈值...")
    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    oof_lgb = np.zeros(len(y))
    oof_cb = np.zeros(len(y))

    for fold, (train_idx, val_idx) in enumerate(skf.split(X, y), 1):
        X_tr, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_tr, y_val = y.iloc[train_idx], y.iloc[val_idx]

        # LightGBM OOF
        dtrain = lgb.Dataset(X_tr, y_tr, categorical_feature=cat_cols)
        lgb_temp = lgb.train(
            lgb_params, dtrain,
            num_boost_round=lgb_params.get('num_boost_round', 1000),
            callbacks=[lgb.log_evaluation(period=0)]
        )
        oof_lgb[val_idx] = lgb_temp.predict(X_val, num_iteration=lgb_temp.best_iteration)

        # CatBoost OOF
        cb_temp = cb.CatBoostClassifier(**cb_params)
        cb_temp.fit(X_tr, y_tr, cat_features=cat_cols, verbose=False)
        oof_cb[val_idx] = cb_temp.predict_proba(X_val)[:, 1]

    # 融合两个模型的 OOF 预测（加权平均）
    ensemble_oof = 0.6 * oof_lgb + 0.4 * oof_cb

    # 使用 F1-score 寻找最优分类阈值（更适合不平衡数据）
    best_thresh, best_f1 = find_best_threshold(y, ensemble_oof, metric='f1')
    logger.info(f"最优分类阈值 (F1最大化): {best_thresh:.3f} → F1-score: {best_f1:.5f}")

    # === 绘制特征重要性图 ===
    plot_feature_importance(lgb_model)

    # === 绘制ROC曲线和PR曲线 ===
    plot_roc_pr_curves(y, ensemble_oof)

    # === 保存模型与元信息 ===
    os.makedirs('./models', exist_ok=True)
    lgb_model.save_model('./models/lgb_model.txt')
    cb_model.save_model('./models/cb_model.cbm')

    meta_info = {
        'feature_names': X.columns.tolist(),  # 特征顺序（用于测试对齐）
        'categorical_features': cat_cols,  # 类别特征列表
        'numerical_features': num_cols,  # 数值特征列表
        'lgb_best_auc': lgb_auc,  # LGBM 交叉验证 AUC
        'cb_best_auc': cb_auc,  # CatBoost 交叉验证 AUC
        'ensemble_oof_auc': roc_auc_score(y, ensemble_oof),  # 融合 OOF AUC
        'best_threshold_for_f1': best_thresh,  # 用于推理的最优阈值
        'best_f1_score': best_f1,
        'pos_weight': pos_weight,
        'original_shape': (len(y), len(X.columns))
    }
    pd.to_pickle(meta_info, './models/meta_info.pkl')

    # 打印最终总结
    logger.info("=" * 60)
    logger.info(f"LightGBM CV AUC: {lgb_auc:.5f}")
    logger.info(f"CatBoost CV AUC: {cb_auc:.5f}")
    logger.info(f"Ensemble OOF AUC: {meta_info['ensemble_oof_auc']:.5f}")
    logger.info(f"最优阈值下 F1-score: {best_f1:.5f} (阈值={best_thresh:.3f})")
    logger.info("模型与元信息已保存至 ./models/")
    logger.info("=" * 60)


def plot_attrition_distribution(y):
    plots_dir = './plots'
    sns.countplot(x=y)
    plt.title('Attrition Distribution (0=No, 1=Yes)')
    plt.savefig(os.path.join(plots_dir, 'eda_attrition_dist.png'), dpi=150, bbox_inches='tight')
    plt.close()


# 特征重要性图（以 LightGBM 为例）
def plot_feature_importance(lgb_model):
    plots_dir = './plots'
    lgb_imp = pd.DataFrame({
        'feature': lgb_model.feature_name(),
        'importance': lgb_model.feature_importance(importance_type='split')
    }).sort_values('importance', ascending=False).head(20)

    plt.figure(figsize=(8, 6))
    sns.barplot(data=lgb_imp, x='importance', y='feature')
    plt.title('LightGBM Feature Importance (Split)')
    plt.savefig(os.path.join(plots_dir, 'lgb_feature_importance.png'), dpi=150, bbox_inches='tight')
    plt.close()


# ROC 曲线 & PR 曲线
def plot_roc_pr_curves(y, ensemble_oof):
    plots_dir = './plots'
    fpr, tpr, _ = roc_curve(y, ensemble_oof)
    roc_auc = auc(fpr, tpr)

    precision, recall, _ = precision_recall_curve(y, ensemble_oof)
    pr_auc = auc(recall, precision)

    plt.figure(figsize=(12, 5))

    # ROC Curve
    plt.subplot(1, 2, 1)
    plt.plot(fpr, tpr, label=f'Ensemble OOF (AUC={roc_auc:.4f})')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel('False Positive Rate');
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve');
    plt.legend()

    # PR Curve
    plt.subplot(1, 2, 2)
    plt.plot(recall, precision, label=f'PR AUC={pr_auc:.4f}')
    plt.xlabel('Recall');
    plt.ylabel('Precision')
    plt.title('Precision-Recall Curve');
    plt.legend()

    plt.tight_layout()
    plt.savefig(os.path.join(plots_dir, 'ensemble_roc_pr_curves.png'), dpi=150)
    plt.close()


if __name__ == '__main__':
    main()

