"""
特征选择案例：过滤法、包装法、嵌入法
演示各种特征选择方法及其应用
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_breast_cancer, make_classification
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.feature_selection import (
    VarianceThreshold, SelectKBest, chi2, f_classif, mutual_info_classif,
    RFE, SelectFromModel
)
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings('ignore')

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False

print("=" * 80)
print("特征选择案例：过滤法、包装法、嵌入法")
print("=" * 80)

# 1. 加载数据
print("\n1. 加载数据集")
print("=" * 80)

# 使用乳腺癌数据集
data = load_breast_cancer()
X = data.data
y = data.target
feature_names = data.feature_names

print(f"数据集形状：{X.shape}")
print(f"特征数量：{X.shape[1]}")
print(f"样本数量：{X.shape[0]}")
print(f"类别分布：{np.bincount(y)}")
print(f"\n前5个特征名称：{list(feature_names[:5])}")

# 分割数据
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.3, random_state=42
)

# 标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 基准模型（使用所有特征）
print("\n基准模型（使用所有30个特征）：")
rf_baseline = RandomForestClassifier(n_estimators=100, random_state=42)
rf_baseline.fit(X_train_scaled, y_train)
baseline_score = accuracy_score(y_test, rf_baseline.predict(X_test_scaled))
print(f"准确率: {baseline_score:.4f}")

# ============================================================================
# 过滤法 (Filter Methods)
# ============================================================================

print("\n" + "=" * 80)
print("2. 过滤法 (Filter Methods)")
print("=" * 80)
print("特点：独立于模型，基于统计指标评估特征")
print("优点：快速、可扩展")
print("缺点：忽略特征间的相互作用")

# 2.1 方差选择法
print("\n2.1 方差选择法 (Variance Threshold)")
print("-" * 80)
print("原理：删除方差低于阈值的特征")
print("适用：删除常量或准常量特征")

# 计算每个特征的方差
variances = np.var(X_train_scaled, axis=0)
variance_df = pd.DataFrame({
    'feature': feature_names,
    'variance': variances
}).sort_values('variance', ascending=False)

print(f"\n方差最高的5个特征：")
print(variance_df.head())
print(f"\n方差最低的5个特征：")
print(variance_df.tail())

# 应用方差阈值
threshold = 0.1
selector_var = VarianceThreshold(threshold=threshold)
X_train_var = selector_var.fit_transform(X_train_scaled)
X_test_var = selector_var.transform(X_test_scaled)

selected_features_var = feature_names[selector_var.get_support()]
print(f"\n方差阈值 {threshold} 选择的特征数量: {len(selected_features_var)}")

# 2.2 单变量特征选择 - F检验
print("\n2.2 单变量特征选择 - F检验 (ANOVA F-test)")
print("-" * 80)
print("原理：计算每个特征与目标的F统计量")
print("适用：分类问题，特征与目标的线性关系")

# 计算F值和p值
f_scores, p_values = f_classif(X_train_scaled, y_train)
f_scores_df = pd.DataFrame({
    'feature': feature_names,
    'f_score': f_scores,
    'p_value': p_values
}).sort_values('f_score', ascending=False)

print(f"\nF值最高的10个特征：")
print(f_scores_df.head(10))

# 选择K个最佳特征
k = 15
selector_f = SelectKBest(f_classif, k=k)
X_train_f = selector_f.fit_transform(X_train_scaled, y_train)
X_test_f = selector_f.transform(X_test_scaled)

selected_features_f = feature_names[selector_f.get_support()]
print(f"\nF检验选择的前{k}个特征：")
print(list(selected_features_f))

# 评估性能
rf_f = RandomForestClassifier(n_estimators=100, random_state=42)
rf_f.fit(X_train_f, y_train)
score_f = accuracy_score(y_test, rf_f.predict(X_test_f))
print(f"准确率: {score_f:.4f} (vs 基准 {baseline_score:.4f})")

# 2.3 互信息
print("\n2.3 互信息 (Mutual Information)")
print("-" * 80)
print("原理：衡量特征与目标的依赖程度")
print("适用：可以捕捉非线性关系")

# 计算互信息
mi_scores = mutual_info_classif(X_train_scaled, y_train, random_state=42)
mi_df = pd.DataFrame({
    'feature': feature_names,
    'mi_score': mi_scores
}).sort_values('mi_score', ascending=False)

print(f"\n互信息最高的10个特征：")
print(mi_df.head(10))

# 选择K个最佳特征
selector_mi = SelectKBest(mutual_info_classif, k=k)
X_train_mi = selector_mi.fit_transform(X_train_scaled, y_train)
X_test_mi = selector_mi.transform(X_test_scaled)

selected_features_mi = feature_names[selector_mi.get_support()]
print(f"\n互信息选择的前{k}个特征：")
print(list(selected_features_mi))

# 评估性能
rf_mi = RandomForestClassifier(n_estimators=100, random_state=42)
rf_mi.fit(X_train_mi, y_train)
score_mi = accuracy_score(y_test, rf_mi.predict(X_test_mi))
print(f"准确率: {score_mi:.4f} (vs 基准 {baseline_score:.4f})")

# 2.4 相关系数法
print("\n2.4 相关系数法 (Correlation)")
print("-" * 80)
print("原理：计算特征与目标的相关系数")
print("适用：快速筛选相关特征")

# 计算相关系数
correlations = []
for i in range(X_train_scaled.shape[1]):
    corr = np.corrcoef(X_train_scaled[:, i], y_train)[0, 1]
    correlations.append(abs(corr))

corr_df = pd.DataFrame({
    'feature': feature_names,
    'correlation': correlations
}).sort_values('correlation', ascending=False)

print(f"\n相关系数最高的10个特征：")
print(corr_df.head(10))

# ============================================================================
# 包装法 (Wrapper Methods)
# ============================================================================

print("\n" + "=" * 80)
print("3. 包装法 (Wrapper Methods)")
print("=" * 80)
print("特点：使用模型评估特征子集")
print("优点：考虑特征间的相互作用")
print("缺点：计算成本高")

# 3.1 递归特征消除 (RFE)
print("\n3.1 递归特征消除 (RFE - Recursive Feature Elimination)")
print("-" * 80)
print("原理：递归删除最不重要的特征")
print("适用：任何有特征重要性的模型")

# 使用逻辑回归作为基础模型
estimator = LogisticRegression(max_iter=10000, random_state=42)
selector_rfe = RFE(estimator, n_features_to_select=k, step=1)
selector_rfe.fit(X_train_scaled, y_train)

X_train_rfe = selector_rfe.transform(X_train_scaled)
X_test_rfe = selector_rfe.transform(X_test_scaled)

selected_features_rfe = feature_names[selector_rfe.get_support()]
print(f"\nRFE选择的前{k}个特征：")
print(list(selected_features_rfe))

# 特征排名
ranking_df = pd.DataFrame({
    'feature': feature_names,
    'ranking': selector_rfe.ranking_
}).sort_values('ranking')
print(f"\n特征排名（1表示被选中）：")
print(ranking_df.head(10))

# 评估性能
rf_rfe = RandomForestClassifier(n_estimators=100, random_state=42)
rf_rfe.fit(X_train_rfe, y_train)
score_rfe = accuracy_score(y_test, rf_rfe.predict(X_test_rfe))
print(f"准确率: {score_rfe:.4f} (vs 基准 {baseline_score:.4f})")

# 3.2 前向选择（手动实现简化版）
print("\n3.2 前向选择 (Forward Selection)")
print("-" * 80)
print("原理：从空集开始，逐步添加最优特征")
print("适用：特征数量不太多的情况")

def forward_selection(X_train, y_train, X_test, y_test, max_features=10):
    """简化的前向选择"""
    n_features = X_train.shape[1]
    selected = []
    remaining = list(range(n_features))
    scores = []
    
    for i in range(max_features):
        best_score = 0
        best_feature = None
        
        for feature in remaining:
            current_features = selected + [feature]
            X_train_subset = X_train[:, current_features]
            X_test_subset = X_test[:, current_features]
            
            model = RandomForestClassifier(n_estimators=50, random_state=42)
            model.fit(X_train_subset, y_train)
            score = accuracy_score(y_test, model.predict(X_test_subset))
            
            if score > best_score:
                best_score = score
                best_feature = feature
        
        if best_feature is not None:
            selected.append(best_feature)
            remaining.remove(best_feature)
            scores.append(best_score)
            print(f"第{i+1}轮: 添加特征 '{feature_names[best_feature]}', 准确率: {best_score:.4f}")
    
    return selected, scores

print(f"\n执行前向选择（选择{k}个特征）：")
selected_forward, scores_forward = forward_selection(
    X_train_scaled, y_train, X_test_scaled, y_test, max_features=k
)

# ============================================================================
# 嵌入法 (Embedded Methods)
# ============================================================================

print("\n" + "=" * 80)
print("4. 嵌入法 (Embedded Methods)")
print("=" * 80)
print("特点：在模型训练过程中进行特征选择")
print("优点：效率高，考虑特征相互作用")
print("缺点：依赖于特定模型")

# 4.1 L1正则化 (Lasso)
print("\n4.1 L1正则化 (Lasso)")
print("-" * 80)
print("原理：L1正则化会将不重要特征的系数压缩为0")
print("适用：线性模型")

# 使用Lasso进行特征选择
lasso = Lasso(alpha=0.01, random_state=42)
lasso.fit(X_train_scaled, y_train)

# 获取非零系数的特征
lasso_coef = np.abs(lasso.coef_)
lasso_df = pd.DataFrame({
    'feature': feature_names,
    'coefficient': lasso_coef
}).sort_values('coefficient', ascending=False)

print(f"\nLasso系数最大的10个特征：")
print(lasso_df.head(10))

# 选择非零系数的特征
selector_lasso = SelectFromModel(lasso, prefit=True, threshold=0.01)
X_train_lasso = selector_lasso.transform(X_train_scaled)
X_test_lasso = selector_lasso.transform(X_test_scaled)

selected_features_lasso = feature_names[selector_lasso.get_support()]
print(f"\nLasso选择的特征数量: {len(selected_features_lasso)}")
print(f"选择的特征: {list(selected_features_lasso)}")

# 评估性能
rf_lasso = RandomForestClassifier(n_estimators=100, random_state=42)
rf_lasso.fit(X_train_lasso, y_train)
score_lasso = accuracy_score(y_test, rf_lasso.predict(X_test_lasso))
print(f"准确率: {score_lasso:.4f} (vs 基准 {baseline_score:.4f})")

# 4.2 树模型特征重要性
print("\n4.2 树模型特征重要性 (Tree-based Feature Importance)")
print("-" * 80)
print("原理：基于特征在决策树中的重要性")
print("适用：树模型（随机森林、GBDT等）")

# 训练随机森林
rf_importance = RandomForestClassifier(n_estimators=100, random_state=42)
rf_importance.fit(X_train_scaled, y_train)

# 获取特征重要性
importances = rf_importance.feature_importances_
importance_df = pd.DataFrame({
    'feature': feature_names,
    'importance': importances
}).sort_values('importance', ascending=False)

print(f"\n特征重要性最高的10个特征：")
print(importance_df.head(10))

# 选择重要性高的特征
selector_tree = SelectFromModel(rf_importance, prefit=True, threshold='median')
X_train_tree = selector_tree.transform(X_train_scaled)
X_test_tree = selector_tree.transform(X_test_scaled)

selected_features_tree = feature_names[selector_tree.get_support()]
print(f"\n树模型选择的特征数量: {len(selected_features_tree)}")
print(f"选择的特征: {list(selected_features_tree)}")

# 评估性能
rf_tree = RandomForestClassifier(n_estimators=100, random_state=42)
rf_tree.fit(X_train_tree, y_train)
score_tree = accuracy_score(y_test, rf_tree.predict(X_test_tree))
print(f"准确率: {score_tree:.4f} (vs 基准 {baseline_score:.4f})")

# ============================================================================
# 结果对比
# ============================================================================

print("\n" + "=" * 80)
print("5. 不同特征选择方法对比")
print("=" * 80)

results = {
    '基准（所有特征）': (30, baseline_score),
    'F检验': (k, score_f),
    '互信息': (k, score_mi),
    'RFE': (k, score_rfe),
    'Lasso': (len(selected_features_lasso), score_lasso),
    '树模型重要性': (len(selected_features_tree), score_tree)
}

results_df = pd.DataFrame(results, index=['特征数量', '准确率']).T
print("\n结果汇总：")
print(results_df)

# 可视化
fig, axes = plt.subplots(2, 2, figsize=(16, 12))

# 1. 准确率对比
methods = list(results.keys())
accuracies = [v[1] for v in results.values()]
colors = ['gray'] + ['skyblue'] * (len(methods) - 1)

axes[0, 0].barh(methods, accuracies, color=colors)
axes[0, 0].set_xlabel('准确率')
axes[0, 0].set_title('不同特征选择方法的准确率对比')
axes[0, 0].set_xlim([0.9, 1.0])
axes[0, 0].grid(True, alpha=0.3, axis='x')
for i, v in enumerate(accuracies):
    axes[0, 0].text(v, i, f' {v:.4f}', va='center')

# 2. 特征数量对比
feature_counts = [v[0] for v in results.values()]
axes[0, 1].barh(methods, feature_counts, color=colors)
axes[0, 1].set_xlabel('特征数量')
axes[0, 1].set_title('不同方法选择的特征数量')
axes[0, 1].grid(True, alpha=0.3, axis='x')
for i, v in enumerate(feature_counts):
    axes[0, 1].text(v, i, f' {v}', va='center')

# 3. F值分布
axes[1, 0].bar(range(len(f_scores)), sorted(f_scores, reverse=True))
axes[1, 0].set_xlabel('特征索引（按F值排序）')
axes[1, 0].set_ylabel('F值')
axes[1, 0].set_title('F检验：特征F值分布')
axes[1, 0].axhline(y=np.median(f_scores), color='r', linestyle='--', 
                   label=f'中位数: {np.median(f_scores):.2f}')
axes[1, 0].legend()
axes[1, 0].grid(True, alpha=0.3)

# 4. 特征重要性分布
axes[1, 1].bar(range(len(importances)), sorted(importances, reverse=True))
axes[1, 1].set_xlabel('特征索引（按重要性排序）')
axes[1, 1].set_ylabel('重要性')
axes[1, 1].set_title('随机森林：特征重要性分布')
axes[1, 1].axhline(y=np.median(importances), color='r', linestyle='--',
                   label=f'中位数: {np.median(importances):.4f}')
axes[1, 1].legend()
axes[1, 1].grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('/Users/binming/Desktop/CodeBase/python/机器学习/特征工程/4-特征选择/特征选择对比.png',
            dpi=300, bbox_inches='tight')
print("\n可视化图表已保存")

# 6. 特征选择最佳实践
print("\n" + "=" * 80)
print("6. 特征选择最佳实践")
print("=" * 80)
print("""
┌─────────────────┬──────────────────────┬─────────────────────┬──────────────────┐
│   方法类别      │      代表方法        │        优点         │      缺点        │
├─────────────────┼──────────────────────┼─────────────────────┼──────────────────┤
│ 过滤法          │ 方差、F检验、互信息  │ 快速、可扩展        │ 忽略特征交互     │
│ Filter          │ 相关系数             │ 独立于模型          │                  │
├─────────────────┼──────────────────────┼─────────────────────┼──────────────────┤
│ 包装法          │ RFE、前向选择        │ 考虑特征交互        │ 计算成本高       │
│ Wrapper         │ 后向消除             │ 性能好              │ 容易过拟合       │
├─────────────────┼──────────────────────┼─────────────────────┼──────────────────┤
│ 嵌入法          │ Lasso、树模型        │ 效率高              │ 依赖特定模型     │
│ Embedded        │ 特征重要性           │ 考虑特征交互        │                  │
└─────────────────┴──────────────────────┴─────────────────────┴──────────────────┘

选择建议：
1. **数据探索阶段**：使用过滤法快速筛选
2. **模型优化阶段**：使用包装法或嵌入法精细选择
3. **高维数据**：先用过滤法降维，再用包装法/嵌入法
4. **线性模型**：Lasso、RFE效果好
5. **树模型**：特征重要性方法
6. **计算资源有限**：过滤法或嵌入法
7. **追求最优性能**：包装法（配合交叉验证）

注意事项：
- 避免在全部数据上进行特征选择（数据泄露）
- 使用交叉验证评估特征选择效果
- 结合领域知识，不要盲目删除特征
- 特征选择是迭代过程，需要多次尝试
- 保存特征选择器，确保训练和预测一致
""")

print("\n" + "=" * 80)
print("案例完成！")
print("=" * 80)
