from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.preprocessing import LabelEncoder
import pandas as pd


def auto_select_features(df, target_col='Attrition', top_k=20):
    """
    自动计算特征与目标变量的影响力，并输出推荐的特征子集

    参数：
        df: 原始DataFrame，包含目标列
        target_col: 目标变量列名，默认为 'Attrition'
        top_k: 每种方法中保留前K个特征

    返回：
        final_features: 推荐用于建模的特征列名列表
    """

    df = df.copy()

    # Step 1. 编码目标变量为 0/1
    y = df[target_col].map({'Yes': 1, 'No': 0}) if df[target_col].dtype == 'object' else df[target_col]

    # Step 2. 分离特征与标签
    X = df.drop(columns=[target_col])

    # Step 3. 数值型特征（用于皮尔逊相关性）
    numeric_cols = X.select_dtypes(include=['int64', 'float64']).columns
    pearson_corr = X[numeric_cols].corrwith(y).abs().sort_values(ascending=False)
    top_corr_features = pearson_corr.head(top_k).index.tolist()

    # Step 4. 分类特征 → One-Hot 编码（用于卡方和互信息）
    X_encoded = pd.get_dummies(X, drop_first=True)

    # Step 5. 卡方检验
    chi_selector = SelectKBest(score_func=chi2, k=min(top_k, X_encoded.shape[1]))
    chi_selector.fit(X_encoded, y)
    chi_scores = pd.Series(chi_selector.scores_, index=X_encoded.columns).sort_values(ascending=False)
    top_chi_features = chi_scores.head(top_k).index.tolist()

    # Step 6. 互信息
    mi_scores = mutual_info_classif(X_encoded, y, discrete_features='auto')
    mi_series = pd.Series(mi_scores, index=X_encoded.columns).sort_values(ascending=False)
    top_mi_features = mi_series.head(top_k).index.tolist()

    # Step 7. 合并并去重（优先取交集，再补充）
    combined = list(set(top_corr_features + top_chi_features + top_mi_features))

    print(f"📊 皮尔逊相关性 Top {top_k}：\n", top_corr_features)
    print(f"\n📊 卡方得分 Top {top_k}：\n", top_chi_features)
    print(f"\n📊 互信息 Top {top_k}：\n", top_mi_features)
    print(f"\n✅ 推荐用于建模的特征数：{len(combined)} 个")

    return combined  # 可用于 X[combined] 做模型训练
if __name__ == '__main__':
    auto_select_features(df=pd.read_csv('../../data/raw/train.csv'))