"""
特征选择和可视化模块

基于reference/1-3-Feature_selection_and_visualization.py改进
实现特征选择、特征分析和可视化功能
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class FeatureSelector:
    """特征选择器 - 基于reference/1-3改进"""
    
    def __init__(self):
        self.selected_features = []
        self.selector = None
        self.feature_importance = None
        self.feature_scores = None
        
    def select_features(self, X, y, method='random_forest', k=30):
        """特征选择"""
        print(f"🔍 使用{method}方法选择{k}个特征...")
        
        if method == 'random_forest':
            self._random_forest_selection(X, y, k)
        elif method == 'mutual_info':
            self._mutual_info_selection(X, y, k)
        elif method == 'f_test':
            self._f_test_selection(X, y, k)
        elif method == 'pca':
            return self._pca_selection(X, k)
        else:
            raise ValueError(f"不支持的特征选择方法: {method}")
        
        return X[self.selected_features]
    
    def _random_forest_selection(self, X, y, k):
        """随机森林特征选择"""
        model = RandomForestClassifier(n_estimators=100, random_state=42)
        model.fit(X, y)
        
        # 获取特征重要性
        importances = model.feature_importances_
        indices = np.argsort(importances)[::-1][:k]
        
        self.selected_features = X.columns[indices].tolist()
        self.feature_importance = importances
        self.feature_scores = importances
        
        print(f"✅ 随机森林选择了{len(self.selected_features)}个特征")
    
    def _mutual_info_selection(self, X, y, k):
        """互信息特征选择"""
        mi_scores = mutual_info_classif(X, y, random_state=42)
        indices = np.argsort(mi_scores)[::-1][:k]
        
        self.selected_features = X.columns[indices].tolist()
        self.feature_scores = mi_scores
        
        print(f"✅ 互信息选择了{len(self.selected_features)}个特征")
    
    def _f_test_selection(self, X, y, k):
        """F检验特征选择"""
        f_scores, _ = f_classif(X, y)
        indices = np.argsort(f_scores)[::-1][:k]
        
        self.selected_features = X.columns[indices].tolist()
        self.feature_scores = f_scores
        
        print(f"✅ F检验选择了{len(self.selected_features)}个特征")
    
    def _pca_selection(self, X, k):
        """PCA降维"""
        pca = PCA(n_components=k)
        X_pca = pca.fit_transform(X)
        
        self.selector = pca
        self.selected_features = [f'PC{i+1}' for i in range(k)]
        
        print(f"✅ PCA降维到{len(self.selected_features)}个主成分")
        return X_pca
    
    def analyze_feature_importance(self, X, y):
        """分析特征重要性"""
        print("📊 分析特征重要性...")
        
        # 随机森林特征重要性
        rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
        rf_model.fit(X, y)
        rf_importance = rf_model.feature_importances_
        
        # 互信息分数
        mi_scores = mutual_info_classif(X, y, random_state=42)
        
        # F检验分数
        f_scores, _ = f_classif(X, y)
        
        # 创建特征重要性DataFrame
        importance_df = pd.DataFrame({
            'feature': X.columns,
            'random_forest': rf_importance,
            'mutual_info': mi_scores,
            'f_test': f_scores
        })
        
        # 标准化分数
        for col in ['random_forest', 'mutual_info', 'f_test']:
            importance_df[f'{col}_normalized'] = (importance_df[col] - importance_df[col].min()) / (importance_df[col].max() - importance_df[col].min())
        
        # 计算综合重要性分数
        importance_df['combined_score'] = (
            importance_df['random_forest_normalized'] * 0.5 +
            importance_df['mutual_info_normalized'] * 0.3 +
            importance_df['f_test_normalized'] * 0.2
        )
        
        # 按综合分数排序
        importance_df = importance_df.sort_values('combined_score', ascending=False)
        
        return importance_df
    
    def analyze_feature_correlation(self, X):
        """分析特征相关性"""
        print("📊 分析特征相关性...")
        
        # 计算相关系数矩阵
        corr_matrix = X.corr()
        
        # 找出高相关性的特征对
        high_corr_pairs = []
        for i in range(len(corr_matrix.columns)):
            for j in range(i+1, len(corr_matrix.columns)):
                corr_val = corr_matrix.iloc[i, j]
                if abs(corr_val) > 0.8:  # 高相关性阈值
                    high_corr_pairs.append({
                        'feature1': corr_matrix.columns[i],
                        'feature2': corr_matrix.columns[j],
                        'correlation': corr_val
                    })
        
        return corr_matrix, high_corr_pairs
    
    def analyze_feature_distribution(self, X, y):
        """分析特征分布"""
        print("📊 分析特征分布...")
        
        # 按类别分析特征分布
        distribution_stats = {}
        
        for feature in X.columns:
            feature_stats = {}
            for class_label in np.unique(y):
                class_data = X[y == class_label][feature]
                feature_stats[f'class_{class_label}'] = {
                    'mean': np.mean(class_data),
                    'std': np.std(class_data),
                    'min': np.min(class_data),
                    'max': np.max(class_data),
                    'median': np.median(class_data)
                }
            distribution_stats[feature] = feature_stats
        
        return distribution_stats
    
    def visualize_features(self, X, y, method='tsne', save_path=None):
        """特征可视化"""
        print(f"📊 使用{method}方法进行特征可视化...")
        
        if method == 'tsne':
            # t-SNE降维
            perplexity = min(30, len(X) - 1)
            tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity)
            X_embedded = tsne.fit_transform(X)
            title = 't-SNE特征空间可视化'
            
        elif method == 'pca':
            # PCA降维
            pca = PCA(n_components=2)
            X_embedded = pca.fit_transform(X)
            title = f'PCA特征空间可视化 (解释方差比: {pca.explained_variance_ratio_.sum():.3f})'
            
        else:
            raise ValueError(f"不支持的可视化方法: {method}")
        
        # 创建图形
        plt.figure(figsize=(12, 8))
        
        # 获取唯一标签和颜色
        unique_labels = np.unique(y)
        colors = plt.cm.viridis(np.linspace(0, 1, len(unique_labels)))
        
        # 绘制散点图
        for i, label in enumerate(unique_labels):
            mask = y == label
            plt.scatter(X_embedded[mask, 0], X_embedded[mask, 1], 
                       c=[colors[i]], label=f'类别 {label}', alpha=0.7, s=50)
        
        plt.xlabel('第一主成分')
        plt.ylabel('第二主成分')
        plt.title(title)
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"✅ 特征可视化图已保存: {save_path}")
        else:
            plt.show()
        
        plt.close()
        
        return X_embedded
    
    def plot_feature_importance(self, importance_df, top_n=20, save_path=None):
        """绘制特征重要性图"""
        print(f"📊 绘制前{top_n}个重要特征...")
        
        # 选择前N个特征
        top_features = importance_df.head(top_n)
        
        # 创建图形
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('特征重要性分析', fontsize=16, fontweight='bold')
        
        # 1. 随机森林重要性
        axes[0, 0].barh(range(len(top_features)), top_features['random_forest'], color='skyblue')
        axes[0, 0].set_yticks(range(len(top_features)))
        axes[0, 0].set_yticklabels(top_features['feature'], fontsize=8)
        axes[0, 0].set_xlabel('重要性')
        axes[0, 0].set_title('随机森林特征重要性')
        axes[0, 0].grid(True, alpha=0.3)
        
        # 2. 互信息分数
        axes[0, 1].barh(range(len(top_features)), top_features['mutual_info'], color='lightcoral')
        axes[0, 1].set_yticks(range(len(top_features)))
        axes[0, 1].set_yticklabels(top_features['feature'], fontsize=8)
        axes[0, 1].set_xlabel('互信息分数')
        axes[0, 1].set_title('互信息特征重要性')
        axes[0, 1].grid(True, alpha=0.3)
        
        # 3. F检验分数
        axes[1, 0].barh(range(len(top_features)), top_features['f_test'], color='lightgreen')
        axes[1, 0].set_yticks(range(len(top_features)))
        axes[1, 0].set_yticklabels(top_features['feature'], fontsize=8)
        axes[1, 0].set_xlabel('F检验分数')
        axes[1, 0].set_title('F检验特征重要性')
        axes[1, 0].grid(True, alpha=0.3)
        
        # 4. 综合分数
        axes[1, 1].barh(range(len(top_features)), top_features['combined_score'], color='gold')
        axes[1, 1].set_yticks(range(len(top_features)))
        axes[1, 1].set_yticklabels(top_features['feature'], fontsize=8)
        axes[1, 1].set_xlabel('综合分数')
        axes[1, 1].set_title('综合特征重要性')
        axes[1, 1].grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"✅ 特征重要性图已保存: {save_path}")
        else:
            plt.show()
        
        plt.close()
    
    def plot_feature_correlation(self, corr_matrix, save_path=None):
        """绘制特征相关性热力图"""
        print("📊 绘制特征相关性热力图...")
        
        plt.figure(figsize=(12, 10))
        
        # 创建热力图
        mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
        sns.heatmap(corr_matrix, mask=mask, annot=False, cmap='coolwarm', center=0,
                   square=True, linewidths=0.5, cbar_kws={"shrink": 0.8})
        
        plt.title('特征相关性热力图')
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"✅ 特征相关性图已保存: {save_path}")
        else:
            plt.show()
        
        plt.close()
    
    def plot_feature_distribution(self, X, y, feature_names, save_path=None):
        """绘制特征分布图"""
        print("📊 绘制特征分布图...")
        
        # 选择前12个特征进行分布分析
        n_features = min(12, len(feature_names))
        selected_features = feature_names[:n_features]
        
        fig, axes = plt.subplots(3, 4, figsize=(16, 12))
        fig.suptitle('特征分布分析', fontsize=16, fontweight='bold')
        
        axes = axes.flatten()
        
        for i, feature in enumerate(selected_features):
            if i < len(axes):
                # 按类别绘制箱线图
                data_by_class = []
                labels = []
                
                for class_label in np.unique(y):
                    class_data = X[y == class_label][feature]
                    data_by_class.append(class_data)
                    labels.append(f'类别 {class_label}')
                
                axes[i].boxplot(data_by_class, labels=labels)
                axes[i].set_title(f'{feature}')
                axes[i].tick_params(axis='x', rotation=45)
                axes[i].grid(True, alpha=0.3)
        
        # 隐藏多余的子图
        for i in range(n_features, len(axes)):
            axes[i].set_visible(False)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"✅ 特征分布图已保存: {save_path}")
        else:
            plt.show()
        
        plt.close()
    
    def generate_feature_analysis_report(self, importance_df, corr_matrix, high_corr_pairs, 
                                       distribution_stats, save_path=None):
        """生成特征分析报告"""
        print("📝 生成特征分析报告...")
        
        report = f"""# 特征分析报告

## 1. 特征重要性分析

### 1.1 前10个重要特征
"""
        
        # 添加前10个重要特征
        top_10_features = importance_df.head(10)
        for i, (_, row) in enumerate(top_10_features.iterrows(), 1):
            report += f"{i}. **{row['feature']}**: 综合分数 {row['combined_score']:.4f}\n"
        
        report += f"""
### 1.2 特征重要性统计
- **随机森林平均重要性**: {importance_df['random_forest'].mean():.4f}
- **互信息平均分数**: {importance_df['mutual_info'].mean():.4f}
- **F检验平均分数**: {importance_df['f_test'].mean():.4f}
- **综合平均分数**: {importance_df['combined_score'].mean():.4f}

## 2. 特征相关性分析

### 2.1 高相关性特征对
发现 {len(high_corr_pairs)} 对高相关性特征 (|相关系数| > 0.8):

"""
        
        # 添加高相关性特征对
        for i, pair in enumerate(high_corr_pairs[:10], 1):  # 只显示前10对
            report += f"{i}. {pair['feature1']} ↔ {pair['feature2']}: {pair['correlation']:.4f}\n"
        
        if len(high_corr_pairs) > 10:
            report += f"... 还有 {len(high_corr_pairs) - 10} 对高相关性特征\n"
        
        report += f"""
### 2.2 相关性统计
- **平均相关系数**: {corr_matrix.values[np.triu_indices_from(corr_matrix.values, k=1)].mean():.4f}
- **最大相关系数**: {corr_matrix.values[np.triu_indices_from(corr_matrix.values, k=1)].max():.4f}
- **最小相关系数**: {corr_matrix.values[np.triu_indices_from(corr_matrix.values, k=1)].min():.4f}

## 3. 特征分布分析

### 3.1 特征分布统计
"""
        
        # 添加特征分布统计
        for feature, stats in list(distribution_stats.items())[:5]:  # 只显示前5个特征
            report += f"\n**{feature}**:\n"
            for class_label, class_stats in stats.items():
                report += f"  - {class_label}: 均值={class_stats['mean']:.4f}, 标准差={class_stats['std']:.4f}\n"
        
        report += f"""
## 4. 特征选择建议

### 4.1 推荐特征
基于综合重要性分数，推荐使用以下特征进行后续分析:
"""
        
        # 添加推荐特征
        recommended_features = importance_df.head(30)['feature'].tolist()
        for i, feature in enumerate(recommended_features, 1):
            report += f"{i}. {feature}\n"
        
        report += f"""
### 4.2 特征选择策略
1. **高重要性特征**: 优先选择综合分数 > 0.5 的特征
2. **低相关性特征**: 避免选择高相关性的特征对
3. **分布差异特征**: 选择不同类别间分布差异明显的特征
4. **特征数量**: 建议选择20-30个特征以平衡性能和复杂度

---
*报告生成时间: {pd.Timestamp.now().strftime('%Y年%m月%d日 %H:%M:%S')}*
"""
        
        if save_path:
            with open(save_path, 'w', encoding='utf-8') as f:
                f.write(report)
            print(f"✅ 特征分析报告已保存: {save_path}")
        
        return report


