#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
分析消融实验结果，解释为什么会出现相同准确率的现象
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def analyze_feature_importance_distribution():
    """分析特征重要性分布"""
    print("=== 分析特征重要性分布 ===")
    
    # 读取数据
    df = pd.read_csv('bearing_features.csv')
    
    # 数据预处理
    exclude_cols = ['label', 'filename', 'rpm', 'sampling_rate', 'bearing_type', 'signal_length']
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    
    # 确保只使用数值特征
    numeric_cols = []
    for col in feature_cols:
        if pd.api.types.is_numeric_dtype(df[col]):
            numeric_cols.append(col)
    
    X = df[numeric_cols].astype(float).fillna(0)
    y = df['label']
    
    # 计算特征重要性
    rf = RandomForestClassifier(n_estimators=100, random_state=42)
    rf.fit(X, y)
    feature_importance = rf.feature_importances_
    
    # 创建重要性DataFrame
    importance_df = pd.DataFrame({
        'feature': numeric_cols,
        'importance': feature_importance
    }).sort_values('importance', ascending=False)
    
    print(f"总特征数: {len(numeric_cols)}")
    print(f"样本数: {len(X)}")
    print(f"特征/样本比例: {len(numeric_cols)/len(X):.2f}")
    
    # 分析前20个特征的重要性
    print("\n前20个特征的重要性:")
    for i, (_, row) in enumerate(importance_df.head(20).iterrows(), 1):
        print(f"{i:2d}. {row['feature']:25s}: {row['importance']:.6f}")
    
    # 分析重要性分布
    print(f"\n重要性统计:")
    print(f"最大值: {importance_df['importance'].max():.6f}")
    print(f"最小值: {importance_df['importance'].min():.6f}")
    print(f"平均值: {importance_df['importance'].mean():.6f}")
    print(f"中位数: {importance_df['importance'].median():.6f}")
    
    # 分析重要性下降趋势
    print(f"\n重要性下降分析:")
    top_5_importance = importance_df.head(5)['importance'].mean()
    top_10_importance = importance_df.head(10)['importance'].mean()
    top_15_importance = importance_df.head(15)['importance'].mean()
    top_20_importance = importance_df.head(20)['importance'].mean()
    
    print(f"前5个特征平均重要性: {top_5_importance:.6f}")
    print(f"前10个特征平均重要性: {top_10_importance:.6f}")
    print(f"前15个特征平均重要性: {top_15_importance:.6f}")
    print(f"前20个特征平均重要性: {top_20_importance:.6f}")
    
    # 检查是否有重要性为0的特征
    zero_importance = importance_df[importance_df['importance'] == 0]
    print(f"\n重要性为0的特征数: {len(zero_importance)}")
    
    return importance_df

def test_feature_combinations():
    """测试不同特征组合的性能"""
    print("\n=== 测试特征组合性能 ===")
    
    # 读取数据
    df = pd.read_csv('bearing_features.csv')
    
    # 数据预处理
    exclude_cols = ['label', 'filename', 'rpm', 'sampling_rate', 'bearing_type', 'signal_length']
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    
    numeric_cols = []
    for col in feature_cols:
        if pd.api.types.is_numeric_dtype(df[col]):
            numeric_cols.append(col)
    
    X = df[numeric_cols].astype(float).fillna(0)
    y = df['label']
    
    # 计算特征重要性
    rf = RandomForestClassifier(n_estimators=100, random_state=42)
    rf.fit(X, y)
    feature_importance = rf.feature_importances_
    
    # 按重要性排序
    importance_df = pd.DataFrame({
        'feature': numeric_cols,
        'importance': feature_importance
    }).sort_values('importance', ascending=False)
    
    # 测试不同数量的特征
    results = []
    for n in [5, 10, 15, 20, 50, 100, len(numeric_cols)]:
        if n <= len(importance_df):
            # 选择前n个特征
            selected_features = importance_df.head(n)['feature'].tolist()
            X_selected = X[selected_features]
            
            # 标准化
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X_selected)
            
            # 交叉验证
            rf_test = RandomForestClassifier(n_estimators=100, random_state=42, class_weight='balanced')
            scores = cross_val_score(rf_test, X_scaled, y, cv=5, scoring='accuracy')
            
            results.append({
                'n_features': n,
                'accuracy_mean': scores.mean(),
                'accuracy_std': scores.std(),
                'features': selected_features[:5]  # 只显示前5个特征名
            })
            
            print(f"前{n:3d}个特征: 准确率 {scores.mean():.4f} ± {scores.std():.4f}")
    
    return results

def plot_importance_analysis(importance_df):
    """绘制重要性分析图"""
    print("\n=== 绘制重要性分析图 ===")
    
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))
    fig.suptitle('特征重要性分析', fontsize=16)
    
    # 1. 前50个特征的重要性分布
    ax1 = axes[0, 0]
    top_50 = importance_df.head(50)
    ax1.plot(range(1, len(top_50)+1), top_50['importance'], 'o-', markersize=4)
    ax1.set_xlabel('特征排名')
    ax1.set_ylabel('重要性')
    ax1.set_title('前50个特征重要性分布')
    ax1.grid(True, alpha=0.3)
    
    # 2. 重要性累积分布
    ax2 = axes[0, 1]
    cumulative_importance = importance_df['importance'].cumsum()
    ax2.plot(range(1, len(cumulative_importance)+1), cumulative_importance, 'r-', linewidth=2)
    ax2.axhline(y=0.8, color='g', linestyle='--', alpha=0.7, label='80%')
    ax2.axhline(y=0.9, color='orange', linestyle='--', alpha=0.7, label='90%')
    ax2.axhline(y=0.95, color='red', linestyle='--', alpha=0.7, label='95%')
    ax2.set_xlabel('特征数量')
    ax2.set_ylabel('累积重要性')
    ax2.set_title('累积重要性分布')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 3. 重要性直方图
    ax3 = axes[1, 0]
    ax3.hist(importance_df['importance'], bins=50, alpha=0.7, edgecolor='black')
    ax3.set_xlabel('重要性值')
    ax3.set_ylabel('特征数量')
    ax3.set_title('重要性值分布直方图')
    ax3.grid(True, alpha=0.3)
    
    # 4. 前20个特征的重要性条形图
    ax4 = axes[1, 1]
    top_20 = importance_df.head(20)
    bars = ax4.barh(range(len(top_20)), top_20['importance'], alpha=0.7)
    ax4.set_yticks(range(len(top_20)))
    ax4.set_yticklabels(top_20['feature'], fontsize=8)
    ax4.set_xlabel('重要性')
    ax4.set_title('前20个特征重要性')
    ax4.invert_yaxis()
    
    plt.tight_layout()
    plt.savefig('feature_importance_analysis.png', dpi=300, bbox_inches='tight')
    plt.show()

def analyze_feature_correlation():
    """分析特征相关性"""
    print("\n=== 分析特征相关性 ===")
    
    # 读取数据
    df = pd.read_csv('bearing_features.csv')
    
    # 数据预处理
    exclude_cols = ['label', 'filename', 'rpm', 'sampling_rate', 'bearing_type', 'signal_length']
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    
    numeric_cols = []
    for col in feature_cols:
        if pd.api.types.is_numeric_dtype(df[col]):
            numeric_cols.append(col)
    
    X = df[numeric_cols].astype(float).fillna(0)
    
    # 计算前20个特征的相关性
    top_20_features = X.columns[:20]
    corr_matrix = X[top_20_features].corr()
    
    # 找出高相关性的特征对
    high_corr_pairs = []
    for i in range(len(corr_matrix.columns)):
        for j in range(i+1, len(corr_matrix.columns)):
            corr_val = corr_matrix.iloc[i, j]
            if abs(corr_val) > 0.8:  # 高相关性阈值
                high_corr_pairs.append({
                    'feature1': corr_matrix.columns[i],
                    'feature2': corr_matrix.columns[j],
                    'correlation': corr_val
                })
    
    print(f"前20个特征中高相关性特征对数量: {len(high_corr_pairs)}")
    for pair in high_corr_pairs[:10]:  # 显示前10对
        print(f"  {pair['feature1']} <-> {pair['feature2']}: {pair['correlation']:.3f}")
    
    return high_corr_pairs

def main():
    """主函数"""
    print("=== 消融实验结果分析 ===")
    
    # 1. 分析特征重要性分布
    importance_df = analyze_feature_importance_distribution()
    
    # 2. 测试特征组合性能
    results = test_feature_combinations()
    
    # 3. 绘制分析图
    plot_importance_analysis(importance_df)
    
    # 4. 分析特征相关性
    high_corr_pairs = analyze_feature_correlation()
    
    # 5. 总结分析
    print("\n=== 分析总结 ===")
    print("1. 为什么top_5、top_10、top_20准确率相同？")
    print("   - 前5个特征已经包含了最重要的信息")
    print("   - 后续特征重要性很低，添加后不会提升性能")
    print("   - 可能存在特征冗余问题")
    
    print("\n2. 为什么top_15比所有特征准确率高？")
    print("   - 所有特征(143个) vs 样本(79个)比例过高，容易过拟合")
    print("   - 包含大量噪声特征，干扰模型学习")
    print("   - top_15特征通过重要性排序，去除了噪声")
    
    print("\n3. 建议：")
    print("   - 使用top_15特征组合")
    print("   - 考虑进一步减少特征数量到10-12个")
    print("   - 分析特征相关性，去除冗余特征")

if __name__ == "__main__":
    main()

