'''
0.4.1 采用 5 fold 交叉验证，并采用集成学习模型进行预测
0.4 vs 0.3
在 preprocess_data 中增加了异常值检测与处理（支持Z-score和IQR法，参数可控，默认Z-score>4视为异常并替换为中位数）。
在特征工程后增加了特征相关性分析与冗余特征去除（默认相关系数大于0.95的数值特征自动去除，支持日志输出）。
'''
# 导入必要库
import warnings
from collections import Counter  # 用于模型融合的投票统计

import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from catboost import CatBoostClassifier  # 集成学习模型
from flaml import AutoML  # 自动化机器学习工具
from lightgbm import LGBMClassifier  # 轻量级梯度提升机
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score, classification_report  # 评估指标
from sklearn.model_selection import KFold  # 交叉验证工具
from sklearn.pipeline import Pipeline  # 管道工具（当前未直接使用）
from sklearn.preprocessing import LabelEncoder, StandardScaler  # 标签编码和标准化
from xgboost import XGBClassifier  # 极端梯度提升机

warnings.filterwarnings('ignore')  # 关闭警告提示

# 配置flaml日志级别（避免训练信息过多）
import logging

# 设置flaml的日志级别为WARNING（仅显示警告及以上日志）
logging.getLogger('flaml.automl.logger').setLevel(logging.WARNING)

# 加载数据（训练集、测试集、提交样例）
train = pd.read_csv('./train.csv', index_col='id')  # 训练集（含目标变量）
test = pd.read_csv('./test.csv', index_col='id')    # 测试集（需预测）
sub = pd.read_csv('./sample_submission.csv', index_col='id')  # 提交格式样例


# 第一步：数据概览（查看数据基本信息）
def load_and_overview_data(train, test):
    """打印数据基本信息（形状、列名、缺失值、目标分布）"""
    print('Train shape:', train.shape)  # 训练集维度（样本数×特征数）
    print('Test shape:', test.shape)    # 测试集维度
    print('\nTrain columns:', train.columns.tolist())  # 训练集特征列表
    print('\nTrain info:')
    print(train.info())  # 数据类型和非空值统计
    print('\nTrain missing values:')
    print(train.isnull().sum())  # 各特征缺失值数量
    print('\nTrain target value counts:')
    print(train['Personality'].value_counts())  # 目标变量类别分布


# 第二步：数据预处理（处理缺失值和类别特征）
def preprocess_data(train, test, outlier_method='zscore', z_thresh=4.0, handle_outlier='median',
                   report_outlier=True, plot_outlier=True, outlier_report_dir='outlier_report'):
    """
    处理数值型和类别型特征的缺失值，统一类别格式，并支持异常值检测与处理、详细报告和可视化
    :param outlier_method: 'zscore' 或 'iqr'，异常值检测方法
    :param z_thresh: Z-score阈值
    :param handle_outlier: 'median'（替换为中位数）或 'nan'（设为缺失值）
    :param report_outlier: 是否输出详细异常值报告
    :param plot_outlier: 是否绘制箱线图和Z-score分布
    :param outlier_report_dir: 异常值报告和图片保存目录
    :return: 处理后的训练集、测试集、填充信息（用于复现）
    """
    import os
    if (report_outlier or plot_outlier) and not os.path.exists(outlier_report_dir):
        os.makedirs(outlier_report_dir)
    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']  # 数值特征列表
    cat_cols_train = ['Stage_fear', 'Drained_after_socializing', 'Personality']  # 训练集类别特征（含目标）
    cat_cols_test = ['Stage_fear', 'Drained_after_socializing']  # 测试集类别特征（不含目标）
    
    # 异常值检测与处理（仅对训练集，避免数据泄漏）
    for col in num_cols:
        outlier_idx = None
        if outlier_method == 'zscore':
            col_z = (train[col] - train[col].mean()) / train[col].std()
            outlier_idx = col_z.abs() > z_thresh
            n_out = outlier_idx.sum()
            if report_outlier and n_out > 0:
                df_out = train.loc[outlier_idx, [col]].copy()
                df_out['zscore'] = col_z[outlier_idx]
                df_out['处理前'] = df_out[col]
                # 处理后数值
                if handle_outlier == 'median':
                    df_out['处理后'] = train[col].median()
                elif handle_outlier == 'nan':
                    df_out['处理后'] = np.nan
                df_out.to_csv(f'{outlier_report_dir}/outlier_{col}_zscore.csv', encoding='utf-8-sig')
                print(f"[Outlier-Zscore] {col}: {n_out} samples | 详细报告已保存")
            if plot_outlier:
                plt.figure(figsize=(8,4))
                sns.boxplot(x=train[col])
                plt.title(f'Boxplot: {col}')
                plt.savefig(f'{outlier_report_dir}/boxplot_{col}.png')
                plt.close()
                plt.figure(figsize=(8,4))
                plt.hist(col_z, bins=50, color='c', alpha=0.7)
                plt.axvline(z_thresh, color='r', linestyle='--')
                plt.axvline(-z_thresh, color='r', linestyle='--')
                plt.title(f'Z-score Distribution: {col}')
                plt.savefig(f'{outlier_report_dir}/zscore_{col}.png')
                plt.close()
            if n_out > 0:
                if handle_outlier == 'median':
                    train.loc[outlier_idx, col] = train[col].median()
                elif handle_outlier == 'nan':
                    train.loc[outlier_idx, col] = np.nan
        elif outlier_method == 'iqr':
            Q1 = train[col].quantile(0.25)
            Q3 = train[col].quantile(0.75)
            IQR = Q3 - Q1
            lower = Q1 - 1.5 * IQR
            upper = Q3 + 1.5 * IQR
            outlier_idx = (train[col] < lower) | (train[col] > upper)
            n_out = outlier_idx.sum()
            if report_outlier and n_out > 0:
                df_out = train.loc[outlier_idx, [col]].copy()
                df_out['处理前'] = df_out[col]
                df_out['Q1'] = Q1
                df_out['Q3'] = Q3
                df_out['IQR'] = IQR
                if handle_outlier == 'median':
                    df_out['处理后'] = train[col].median()
                elif handle_outlier == 'nan':
                    df_out['处理后'] = np.nan
                df_out.to_csv(f'{outlier_report_dir}/outlier_{col}_iqr.csv', encoding='utf-8-sig')
                print(f"[Outlier-IQR] {col}: {n_out} samples | 详细报告已保存")
            if plot_outlier:
                plt.figure(figsize=(8,4))
                sns.boxplot(x=train[col])
                plt.title(f'Boxplot: {col}')
                plt.savefig(f'{outlier_report_dir}/boxplot_{col}.png')
                plt.close()
            if n_out > 0:
                if handle_outlier == 'median':
                    train.loc[outlier_idx, col] = train[col].median()
                elif handle_outlier == 'nan':
                    train.loc[outlier_idx, col] = np.nan

    # 数值特征用训练集中位数填充（避免数据泄露）
    num_medians = train[num_cols].median()
    train[num_cols] = train[num_cols].fillna(num_medians)
    test[num_cols] = test[num_cols].fillna(num_medians)
    
    # 类别特征统一格式（转小写、去空格）并填充缺失值为'missing'
    for col in cat_cols_train:
        train[col] = train[col].astype(str).str.strip().str.lower()  # 统一格式
        train[col] = train[col].replace('nan', np.nan).fillna('missing')  # 填充缺失值
    for col in cat_cols_test:
        test[col] = test[col].astype(str).str.strip().str.lower()
        test[col] = test[col].replace('nan', np.nan).fillna('missing')
    
    # 记录填充信息（用于后续可能的验证）
    fill_info = {'num_medians': num_medians.to_dict(), 'cat_fill': 'missing'}
    return train, test, fill_info


# --- 增强版特征工程 ---
def build_features(train, test, target_col='Personality',
                   add_interactions=True, add_cat_combo=True, add_bins=True,
                   add_stats=True, select_top_n=20, use_pca=True, pca_n=8,
                   use_target_encoding=True, use_freq_encoding=True):
    """
    构造模型输入特征（含多种增强特征工程）
    :return: 训练集特征、训练集目标、测试集特征
    """
    """构造模型输入特征（含多种增强特征工程）"""
    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']
    cat_cols = ['Stage_fear', 'Drained_after_socializing']
    df_train = train.copy()
    df_test = test.copy()

    # 1. 数值特征交互
    if add_interactions:
        for i in range(len(num_cols)):
            for j in range(i+1, len(num_cols)):
                col_name = f'{num_cols[i]}_x_{num_cols[j]}'
                df_train[col_name] = df_train[num_cols[i]] * df_train[num_cols[j]]
                df_test[col_name] = df_test[num_cols[i]] * df_test[num_cols[j]]

    # 2. 类别特征组合
    if add_cat_combo:
        combo_col = cat_cols[0] + '_' + cat_cols[1]
        df_train[combo_col] = df_train[cat_cols[0]].astype(str) + '_' + df_train[cat_cols[1]].astype(str)
        df_test[combo_col] = df_test[cat_cols[0]].astype(str) + '_' + df_test[cat_cols[1]].astype(str)
        cat_combo_cols = cat_cols + [combo_col]
    else:
        cat_combo_cols = cat_cols.copy()

    # 3. 连续特征分箱（等频分箱）
    if add_bins:
        for col in num_cols:
            # 基于训练集确定分箱边界（等频）
            _, bins = pd.qcut(df_train[col], 4, duplicates='drop', retbins=True)
            # 测试集使用训练集的分箱边界
            df_train[f'{col}_bin'] = pd.cut(df_train[col], bins=bins, labels=False, include_lowest=True)
            df_test[f'{col}_bin'] = pd.cut(df_test[col], bins=bins, labels=False, include_lowest=True)
        bin_cols = [f'{col}_bin' for col in num_cols]
    else:
        bin_cols = []

    # 4. 统计特征
    if add_stats:
        # 全局相对统计（当前值 vs 全局统计量）
        for col in num_cols:
            global_mean = df_train[col].mean()
            global_max = df_train[col].max()
            global_min = df_train[col].min()
            # 新增相对特征（当前值与全局统计量的差异）
            df_train[f'{col}_vs_mean'] = df_train[col] - global_mean
            df_train[f'{col}_vs_max'] = df_train[col] - global_max
            df_train[f'{col}_vs_min'] = df_train[col] - global_min
            # 测试集使用训练集的全局统计量
            df_test[f'{col}_vs_mean'] = df_test[col] - global_mean
            df_test[f'{col}_vs_max'] = df_test[col] - global_max
            df_test[f'{col}_vs_min'] = df_test[col] - global_min

        # 分组统计（保留原逻辑）
        for cat in cat_cols:
            for col in num_cols:
                grp_mean = df_train.groupby(cat)[col].transform('mean')
                df_train[f'{col}_mean_by_{cat}'] = grp_mean
                mean_map = df_train.groupby(cat)[col].mean().to_dict()
                df_test[f'{col}_mean_by_{cat}'] = df_test[cat].map(mean_map)


    # 5. 类别特征编码优化
    # 5.1 目标编码（使用交叉验证折计算OOB编码）
    if use_target_encoding:
        kf_enc = KFold(n_splits=5, shuffle=True, random_state=42)  # 与后续验证折数一致
        for cat in cat_cols:
            df_train[f'{cat}_target_enc'] = 0.5  # 初始化默认值
            # 按折计算OOB编码
            for train_enc_idx, val_enc_idx in kf_enc.split(df_train):
                # 用当前训练折计算目标频率
                target_map = df_train.iloc[train_enc_idx].groupby(cat)[target_col].apply(
                    lambda x: x.value_counts(normalize=True).get('extrovert', 0.5)
                )
                # 填充当前验证折的编码值
                df_train.loc[val_enc_idx, f'{cat}_target_enc'] = df_train.iloc[val_enc_idx][cat].map(target_map).fillna(0.5)
            # 测试集用全量训练数据的目标编码（最终预测使用）
            target_map_full = df_train.groupby(cat)[target_col].apply(
                lambda x: x.value_counts(normalize=True).get('extrovert', 0.5)
            )
            df_test[f'{cat}_target_enc'] = df_test[cat].map(target_map_full).fillna(0.5)

    # 5.2 频率编码（同理改为OOB）
    if use_freq_encoding:
        kf_enc = KFold(n_splits=5, shuffle=True, random_state=42)
        for cat in cat_cols:
            df_train[f'{cat}_freq_enc'] = 0.0  # 初始化默认值
            # 按折计算OOB频率
            for train_enc_idx, val_enc_idx in kf_enc.split(df_train):
                freq_map = df_train.iloc[train_enc_idx][cat].value_counts(normalize=True)
                df_train.loc[val_enc_idx, f'{cat}_freq_enc'] = df_train.iloc[val_enc_idx][cat].map(freq_map).fillna(0)
            # 测试集用全量训练数据的频率
            freq_map_full = df_train[cat].value_counts(normalize=True)
            df_test[f'{cat}_freq_enc'] = df_test[cat].map(freq_map_full).fillna(0)

    # 6. 数值特征标准化（扩展到所有数值增强特征）
    # 原始数值特征 + 交互特征 + 统计特征
    all_num_cols = num_cols + [c for c in df_train.columns if any(s in c for s in ['_x_', '_vs_', '_by_'])]
    scaler = StandardScaler()
    train_num = scaler.fit_transform(df_train[all_num_cols])
    test_num = scaler.transform(df_test[all_num_cols])

    # 7. 类别特征独热编码（含组合、分箱）
    from sklearn.preprocessing import OneHotEncoder

    cat_cols_used = cat_combo_cols + bin_cols
    # 初始化OneHotEncoder（忽略测试集新类别）
    ohe = OneHotEncoder(sparse_output=False, handle_unknown='ignore')
    train_cat = ohe.fit_transform(df_train[cat_cols_used])  # 输出为NumPy数组
    test_cat = ohe.transform(df_test[cat_cols_used])        # 输出为NumPy数组


    # 8. 其余增强特征
    extra_cols = [c for c in df_train.columns if any(s in c for s in ['_x_', '_mean', '_max', '_min', '_by_', '_enc'])]
    train_extra = df_train[extra_cols].values if extra_cols else np.empty((len(df_train), 0))
    test_extra = df_test[extra_cols].values if extra_cols else np.empty((len(df_test), 0))

    # 9. 合并所有特征
    X_train = np.hstack([train_num, train_cat, train_extra])
    X_test = np.hstack([test_num, test_cat, test_extra])
    y_train = df_train[target_col].values  # 关键修正：提前赋值y_train


    # 10. 特征选择（LGBM重要性筛选）
    if select_top_n is not None and X_train.shape[1] > select_top_n:
        lgbm = LGBMClassifier(n_estimators=100, random_state=42)
        lgbm.fit(X_train, y_train)  # 此时y_train已定义
        importances = lgbm.feature_importances_
        idx = np.argsort(importances)[::-1][:select_top_n]
        X_train = X_train[:, idx]
        X_test = X_test[:, idx]

    # 11. PCA降维
    if use_pca and X_train.shape[1] > pca_n:
        pca = PCA(n_components=pca_n, random_state=42)
        X_train = pca.fit_transform(X_train)
        X_test = pca.transform(X_test)

    return X_train, y_train, X_test


def remove_highly_correlated_features(X, feature_names, threshold=0.95, verbose=True):
    """
    检查特征间相关性，去除冗余特征（仅对数值特征）
    :param X: 特征矩阵（numpy array）
    :param feature_names: 特征名列表
    :param threshold: 相关性阈值
    :return: 新特征矩阵、新特征名
    """
    import pandas as pd
    df = pd.DataFrame(X, columns=feature_names)
    corr_matrix = df.corr().abs()
    upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
    to_drop = [column for column in upper.columns if any(upper[column] > threshold)]
    if verbose and to_drop:
        print(f"[Correlation] Drop features (corr>{threshold}):", to_drop)
    df = df.drop(columns=to_drop)
    return df.values, [c for c in feature_names if c not in to_drop]

# -------------------- 数据处理流程 --------------------
# 1. 数据概览（打印基本信息）
load_and_overview_data(train, test)

# 2. 预处理数据（填充缺失值）
train, test, fill_info = preprocess_data(train, test)

# 3. 特征工程（构造模型输入）
X_train, y_train, X_test = build_features(train, test, target_col='Personality')

# 特征相关性分析与冗余特征去除（仅对数值特征部分）
num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']
# 假设build_features返回的特征顺序：数值特征+其他
num_num = len(num_cols)
X_train_num, X_train_rest = X_train[:, :num_num], X_train[:, num_num:]
X_train_num_new, num_cols_new = remove_highly_correlated_features(X_train_num, num_cols, threshold=0.95)
# 同步处理测试集
X_test_num = X_test[:, :num_num]
X_test_num_new = pd.DataFrame(X_test_num, columns=num_cols)[num_cols_new].values
# 合并回完整特征
X_train = np.hstack([X_train_num_new, X_train_rest])
X_test = np.hstack([X_test_num_new, X_test[:, num_num:]])
# 4. 目标变量标签编码（将文本类别转为数值）
le = LabelEncoder()
y_train_enc = le.fit_transform(y_train)  # 训练集目标编码


# -------------------- LGBM 模型（KFold交叉验证） --------------------
print('\nLGBM 模型训练开始...')
# 定义LGBM参数（手动调参）
lgbm_params = {
    'n_estimators': 200,    # 迭代次数
    'learning_rate': 0.05,  # 学习率
    'max_depth': 7,         # 树最大深度
    'random_state': 42      # 随机种子（保证可复现）
}
model = LGBMClassifier(**lgbm_params)

# 5折交叉验证（评估模型泛化能力）
kf = KFold(n_splits=5, shuffle=True, random_state=42)  # 5折，随机打乱数据
lgbm_oof_preds = np.zeros(len(X_train), dtype=int)     # 保存各折验证集预测结果
lgbm_oof_probs = np.zeros((len(X_train), len(le.classes_)))  # 保存概率值
fold_scores = []  # 保存各折准确率

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    # 划分训练/验证集
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    
    # 模型训练+验证
    model.fit(X_tr, y_tr)
    val_pred = model.predict(X_val)       # 验证集类别预测
    val_prob = model.predict_proba(X_val) # 验证集概率预测
    lgbm_oof_preds[val_idx] = val_pred    # 记录验证集预测结果
    lgbm_oof_probs[val_idx] = val_prob    # 记录概率值
    acc = accuracy_score(y_val, val_pred) # 计算当前折准确率
    print(f"Fold {fold+1} accuracy: {acc:.4f}")
    fold_scores.append(acc)

# 输出交叉验证结果
print("\n5折交叉验证平均准确率：", np.mean(fold_scores))
print("LGBM OOF分类报告：")
print(classification_report(y_train, le.inverse_transform(lgbm_oof_preds)))  # OOF预测与真实值对比

# 全量数据训练并生成测试集预测
model.fit(X_train, y_train_enc)
lgbm_preds_enc = model.predict(X_test)
lgbm_preds = le.inverse_transform(lgbm_preds_enc)  # 逆编码得到原始类别
# 保存模型
joblib.dump(model, 'lgbm_final_model.pkl')
# 保存LGBM预测结果
df_lgbm_pred = pd.DataFrame({'id': test.index, 'Personality': lgbm_preds})
df_lgbm_pred['Personality'] = df_lgbm_pred['Personality'].str.capitalize()
# df_lgbm_pred.to_csv('submission_lgbm.csv', index=False)


# -------------------- AutoML 交叉验证训练 --------------------
print('\nAutoML 交叉验证训练开始...')
# 提升AutoML计算复杂度和搜索空间
settings = {
    "time_budget": 360,  # 增加训练时间预算（秒）
    "task": 'classification',
    "log_file_name": 'flaml.log',
    "metric": 'accuracy',
    "estimator_list": [
        'lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree', 'lrl1', 'lrl2'
    ],  # 加入更多模型
    "n_jobs": -1,  # 多核并行
    "seed": 42,
    "verbose": 1,
    # 可选：可进一步自定义搜索空间
    "auto_augment": True,
}
# 初始化OOB概率存储数组（形状：[样本数, 类别数]）
automl_oof_probs = np.zeros((len(X_train), len(le.classes_)))
fold_scores_automl = []  # 保存各折准确率

# 使用与LGBM相同的5折交叉验证
kf = KFold(n_splits=5, shuffle=True, random_state=42)

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    
    # 每折使用相同的AutoML配置
    automl_fold = AutoML()
    automl_fold.fit(X_train=X_tr, y_train=y_tr, **settings)
    
    # 预测验证集概率并保存到OOB数组
    val_probs = automl_fold.predict_proba(X_val)
    automl_oof_probs[val_idx] = val_probs
    
    # 计算当前折准确率
    val_pred = le.inverse_transform(val_probs.argmax(axis=1))
    acc = accuracy_score(le.inverse_transform(y_val), val_pred)
    print(f"AutoML Fold {fold+1} accuracy: {acc:.4f}")
    fold_scores_automl.append(acc)

# 输出AutoML交叉验证结果
print("\nAutoML 5折交叉验证平均准确率：", np.mean(fold_scores_automl))

# 全量数据训练最终模型（用于测试集预测）
print('\nAutoML 全量训练开始...')
automl_final = AutoML()
automl_final.fit(X_train=X_train, y_train=y_train_enc, **settings)
automl_probs = automl_final.predict_proba(X_test)  # 测试集概率预测

# 保存AutoML OOB概率和最终模型
joblib.dump(automl_oof_probs, 'automl_oof_probs.pkl')
joblib.dump(automl_final, 'automl_final_model.pkl')

# 原训练集评估调整为使用OOB概率（替代原始全量训练的评估）
automl_train_preds_enc = automl_oof_probs.argmax(axis=1)
automl_train_preds = le.inverse_transform(automl_train_preds_enc)
print('\nAutoML OOB训练集评估：')
print(classification_report(y_train, automl_train_preds))
print('AutoML OOB训练集准确率：', accuracy_score(y_train, automl_train_preds))


# -------------------- 二模型概率加权融合 --------------------
print('\n二模型概率加权融合开始...')

# 训练集OOB概率融合评估（使用交叉验证OOB概率）
blend_oof_probs = 0.5 * automl_oof_probs + 0.5 * lgbm_oof_probs  # OOB概率加权融合
blend_oof_preds_enc = blend_oof_probs.argmax(axis=1)
blend_oof_preds = le.inverse_transform(blend_oof_preds_enc)
print('\n二模型OOB融合训练集评估：')
print(classification_report(y_train, blend_oof_preds))
print('二模型OOB融合训练集准确率：', accuracy_score(y_train, blend_oof_preds))

# 测试集使用全量模型概率融合（使用交叉验证后的全量模型）
lgbm_probs = model.predict_proba(X_test)  # LGBM全量模型概率
blend_probs = 0.5 * automl_probs + 0.5 * lgbm_probs  # automl_probs来自automl_final
ensemble2_preds = le.inverse_transform(blend_probs.argmax(axis=1))  # 概率转类别

# 保存融合预测结果
df_ensemble2 = pd.DataFrame({'id': test.index, 'Personality': ensemble2_preds}) 
df_ensemble2['Personality'] = df_ensemble2['Personality'].str.capitalize()
df_ensemble2.to_csv('submission.csv', index=False)

# -------------------- 结果分析与可视化 --------------------
def analyze_and_visualize_results(model, X_train, y_train, lgbm_oof_preds, le, train, num_cols, df_train=None):
    """
    结果分析与可视化：特征重要性、SHAP、子集表现、混淆矩阵
    model: 已训练的LGBM模型
    X_train: 训练特征
    y_train: 训练标签（原始）
    lgbm_oof_preds: LGBM OOF预测（编码后）
    le: LabelEncoder
    train: 原始训练DataFrame
    num_cols: 数值特征名
    df_train: 特征工程后的DataFrame（如可用）
    """
    print('\n[结果分析与可视化]')
    # 1. LGBM特征重要性条形图
    try:
        if df_train is not None:
            feature_names = num_cols + [c for c in df_train.columns if c not in num_cols + ['Personality']]
        else:
            feature_names = [f'f{i}' for i in range(X_train.shape[1])]
        if hasattr(model, 'feature_importances_'):
            importances = model.feature_importances_
            plt.figure(figsize=(10,6))
            indices = np.argsort(importances)[::-1][:30]  # 只画前30重要特征
            plt.barh(np.array(feature_names)[indices][::-1], importances[indices][::-1])
            plt.title('LGBM Feature Importances (Top 30)')
            plt.tight_layout()
            plt.savefig('lgbm_feature_importance.png')
            plt.close()
            print('LGBM特征重要性图已保存为 lgbm_feature_importance.png')
    except Exception as e:
        print('LGBM特征重要性可视化失败:', e)
    # 2. SHAP全局解释（如可用）
    try:
        import shap
        explainer = shap.TreeExplainer(model)
        shap_values = explainer.shap_values(X_train)
        plt.figure()
        shap.summary_plot(shap_values, X_train, feature_names=feature_names, show=False)
        plt.tight_layout()
        plt.savefig('lgbm_shap_summary.png')
        plt.close()
        print('LGBM SHAP全局解释图已保存为 lgbm_shap_summary.png')
    except Exception as e:
        print('SHAP可视化失败（如未安装shap可忽略）:', e)
    # 3. 按关键特征分组的子集表现分析
    for group_col in ['Stage_fear', 'Drained_after_socializing']:
        if group_col in train.columns:
            print(f'\n[子集分析] 按 {group_col} 分组:')
            for val in train[group_col].unique():
                idx = train[group_col] == val
                if idx.sum() == 0:
                    continue
                y_true = y_train[idx]
                y_pred = le.inverse_transform(lgbm_oof_preds[idx])
                acc = accuracy_score(y_true, y_pred)
                print(f'  {group_col}={val}: 样本数={idx.sum()}  准确率={acc:.4f}')
                print(classification_report(y_true, y_pred))
                # 混淆矩阵可视化
                from sklearn.metrics import confusion_matrix
                cm = confusion_matrix(y_true, y_pred, labels=le.classes_)
                plt.figure(figsize=(4,3))
                sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=le.classes_, yticklabels=le.classes_)
                plt.title(f'Confusion Matrix: {group_col}={val}')
                plt.xlabel('Predicted')
                plt.ylabel('True')
                plt.tight_layout()
                plt.savefig(f'confusion_{group_col}_{val}.png')
                plt.close()
                print(f'  混淆矩阵已保存: confusion_{group_col}_{val}.png')

analyze_and_visualize_results(model, X_train, y_train, lgbm_oof_preds, le, train, num_cols)