"""特征选择工具：分析和剔除低质量特征."""

from __future__ import annotations

import logging
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, List, Set

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import mutual_info_classif
from sklearn.preprocessing import LabelEncoder

from .quality_utils import (
    FUNCTIONAL_OVERLAP_GROUPS,
    categorize_feature,
    compute_coefficient_of_variation,
    compute_iqr_outlier_ratio,
    summarize_feature_categories,
    winsorize_series,
)

logger = logging.getLogger(__name__)


@dataclass
class FeatureQualityMetrics:
    """特征质量指标."""

    variance: float  # 方差
    missing_ratio: float  # 缺失值比例
    correlation_with_target: float  # 与目标变量的相关性
    mutual_info: float  # 互信息
    importance_score: float | None = None  # 特征重要性分数（如果可用）
    coefficient_of_variation: float | None = None  # 变异系数
    outlier_ratio: float | None = None  # IQR 异常值占比
    winsorized: bool = False  # 是否执行 Winsorize
    time_stability_gap: float | None = None  # 前后时段相关性差异
    rolling_importance_volatility: float | None = None  # 滚动窗口重要性波动
    stability_flag: str | None = None  # 不稳定标记


@dataclass
class FeatureSelectionResult:
    """特征选择结果."""

    selected_features: List[str]  # 选中的特征列表
    removed_features: List[str]  # 被剔除的特征列表
    removal_reasons: Dict[str, str]  # 剔除原因
    selection_reasons: Dict[str, str]  # 保留原因
    quality_metrics: Dict[str, FeatureQualityMetrics]  # 所有特征的质量指标
    original_count: int  # 原始特征数量
    selected_count: int  # 选中特征数量
    coverage_summary: Dict[str, int] = field(default_factory=dict)
    warnings: List[str] = field(default_factory=list)


@dataclass
class FeatureQualityAnalysis:
    """特征质量分析结果."""

    sanitized_frame: pd.DataFrame
    metrics: Dict[str, FeatureQualityMetrics]


class FeatureSelector:
    """特征选择器：分析和选择高质量特征."""

    def __init__(
        self,
        variance_threshold: float = 1e-6,
        missing_ratio_threshold: float = 0.05,
        correlation_threshold: float = 0.90,
        importance_threshold: float = 0.005,
        mutual_info_threshold: float = 0.01,
        cv_threshold: float = 1e-3,
        outlier_ratio_threshold: float = 0.10,
        target_correlation_threshold: float = 0.02,
        importance_similarity_gap: float = 0.001,
        min_feature_count: int = 30,
        max_feature_count: int = 50,
    ) -> None:
        """初始化特征选择器.

        Parameters
        ----------
        variance_threshold:
            方差阈值，低于此值的特征将被剔除（默认 1e-6）
        missing_ratio_threshold:
            缺失值比例阈值，高于此值的特征将被剔除（默认 0.5，即50%）
        correlation_threshold:
            特征间相关性阈值，高于此值的特征组中只保留最重要的（默认 0.95）
        importance_threshold:
            特征重要性阈值，低于此值的特征将被剔除（默认 0.001）
        importance_similarity_gap:
            重要性差异阈值（相对比例），小于该差异的特征视为冗余（默认 0.001 即 0.1%）
        mutual_info_threshold:
            互信息阈值，低于此值的特征将被剔除（默认 0.01）
        """
        self.variance_threshold = variance_threshold
        self.missing_ratio_threshold = missing_ratio_threshold
        self.correlation_threshold = correlation_threshold
        self.importance_threshold = importance_threshold
        self.mutual_info_threshold = mutual_info_threshold
        self.cv_threshold = cv_threshold
        self.outlier_ratio_threshold = outlier_ratio_threshold
        self.target_correlation_threshold = target_correlation_threshold
        self.importance_similarity_gap = importance_similarity_gap
        self.min_feature_count = min_feature_count
        self.max_feature_count = max_feature_count

    def analyze_feature_quality(
        self,
        X: pd.DataFrame,
        y: pd.Series | None = None,
        feature_importance: Dict[str, float] | None = None,
    ) -> FeatureQualityAnalysis:
        """分析所有特征的质量指标并返回清洗后的特征数据.

        Parameters
        ----------
        X:
            特征数据（DataFrame）
        y:
            目标变量（可选，用于计算相关性和互信息）
        feature_importance:
            特征重要性字典（可选，从训练好的模型获取）

        Returns
        -------
        FeatureQualityAnalysis
        """
        metrics: Dict[str, FeatureQualityMetrics] = {}
        sanitized_columns: Dict[str, pd.Series] = {}

        for col in X.columns:
            series = X[col]
            cleaned_series, metric = self._process_feature_series(series, y)
            sanitized_columns[col] = cleaned_series
            metrics[col] = metric

        sanitized_frame = pd.DataFrame(sanitized_columns, index=X.index)

        if y is not None:
            self._apply_stability_checks(sanitized_frame, y, metrics)

        # 结合外部提供的重要性分数
        if feature_importance:
            total = sum(feature_importance.values()) or 1.0
            for feat, score in feature_importance.items():
                normalized = score / total
                if feat in metrics:
                    metrics[feat].importance_score = max(
                        metrics[feat].importance_score or 0.0,
                        normalized,
                    )

        return FeatureQualityAnalysis(sanitized_frame=sanitized_frame, metrics=metrics)

    def select_features(
        self,
        X: pd.DataFrame,
        y: pd.Series | None = None,
        feature_importance: Dict[str, float] | None = None,
    ) -> FeatureSelectionResult:
        """选择高质量特征.

        Parameters
        ----------
        X:
            特征数据（DataFrame）
        y:
            目标变量（可选）
        feature_importance:
            特征重要性字典（可选）

        Returns
        -------
        特征选择结果
        """
        original_features = list(X.columns)
        original_count = len(original_features)

        logger.info(f"开始特征选择: 原始特征数={original_count}")

        # 1. 分析所有特征的质量
        analysis = self.analyze_feature_quality(X, y, feature_importance)
        quality_metrics = analysis.metrics
        sanitized_X = analysis.sanitized_frame

        # 结合随机森林重要性
        rf_importance = self._compute_random_forest_importance(sanitized_X, y)
        for feat, score in rf_importance.items():
            if feat in quality_metrics:
                quality_metrics[feat].importance_score = max(
                    quality_metrics[feat].importance_score or 0.0,
                    score,
                )

        # 2. 逐步剔除低质量特征
        removed_features: List[str] = []
        removal_reasons: Dict[str, str] = {}
        remaining_features = set(original_features)
        warnings: List[str] = []

        # 2.1 剔除低方差特征
        low_variance_features = [
            feat
            for feat in remaining_features
            if quality_metrics[feat].variance < self.variance_threshold
        ]
        for feat in low_variance_features:
            removed_features.append(feat)
            removal_reasons[feat] = f"低方差 (方差={quality_metrics[feat].variance:.2e} < {self.variance_threshold:.2e})"
            remaining_features.remove(feat)
        if low_variance_features:
            logger.info(f"剔除低方差特征: {len(low_variance_features)} 个")

        # 2.2 剔除高缺失值特征
        high_missing_features = [
            feat
            for feat in remaining_features
            if quality_metrics[feat].missing_ratio > self.missing_ratio_threshold
        ]
        for feat in high_missing_features:
            removed_features.append(feat)
            removal_reasons[feat] = (
                f"高缺失值 (缺失比例={quality_metrics[feat].missing_ratio:.2%} > {self.missing_ratio_threshold:.2%})"
            )
            remaining_features.remove(feat)
        if high_missing_features:
            logger.info(f"剔除高缺失值特征: {len(high_missing_features)} 个")

        # 2.3 剔除低变异系数特征
        low_cv_features = [
            feat
            for feat in remaining_features
            if quality_metrics[feat].coefficient_of_variation is not None
            and quality_metrics[feat].coefficient_of_variation < self.cv_threshold
        ]
        for feat in low_cv_features:
            removed_features.append(feat)
            removal_reasons[feat] = (
                f"低变异系数 (CV={quality_metrics[feat].coefficient_of_variation:.3e} < {self.cv_threshold:.3e})"
            )
            remaining_features.remove(feat)
        if low_cv_features:
            logger.info(f"剔除低变异系数特征: {len(low_cv_features)} 个")

        # 2.4 剔除低特征-目标相关性
        if y is not None:
            low_target_corr_features = [
                feat
                for feat in remaining_features
                if quality_metrics[feat].correlation_with_target < self.target_correlation_threshold
            ]
            for feat in low_target_corr_features:
                removed_features.append(feat)
                removal_reasons[feat] = (
                    f"目标相关性低 (|corr|={quality_metrics[feat].correlation_with_target:.3f} < "
                    f"{self.target_correlation_threshold:.3f})"
                )
                remaining_features.remove(feat)
            if low_target_corr_features:
                logger.info(f"剔除低目标相关性特征: {len(low_target_corr_features)} 个")

        # 2.5 剔除高相关性特征（保留最重要的）
        if len(remaining_features) > 1:
            remaining_features_list = list(remaining_features)
            X_remaining = sanitized_X[remaining_features_list]

            # 计算特征间相关性矩阵
            corr_matrix = X_remaining.corr().abs()

            # 找出高相关性特征对
            high_corr_pairs: List[tuple[str, str]] = []
            for i, feat1 in enumerate(remaining_features_list):
                for j, feat2 in enumerate(remaining_features_list[i + 1 :], start=i + 1):
                    if corr_matrix.loc[feat1, feat2] > self.correlation_threshold:
                        high_corr_pairs.append((feat1, feat2))

            # 对于每个高相关性特征对，保留重要性更高的
            features_to_remove: Set[str] = set()
            for feat1, feat2 in high_corr_pairs:
                if feat1 in features_to_remove or feat2 in features_to_remove:
                    continue

                # 比较两个特征的重要性
                importance1 = (
                    quality_metrics[feat1].importance_score
                    if quality_metrics[feat1].importance_score is not None
                    else quality_metrics[feat1].correlation_with_target
                )
                importance2 = (
                    quality_metrics[feat2].importance_score
                    if quality_metrics[feat2].importance_score is not None
                    else quality_metrics[feat2].correlation_with_target
                )

                # 保留重要性更高的特征
                if importance1 < importance2:
                    features_to_remove.add(feat1)
                    removal_reasons[feat1] = (
                        f"高相关性 (与 {feat2} 相关性={corr_matrix.loc[feat1, feat2]:.3f} > {self.correlation_threshold:.3f})"
                    )
                else:
                    features_to_remove.add(feat2)
                    removal_reasons[feat2] = (
                        f"高相关性 (与 {feat1} 相关性={corr_matrix.loc[feat1, feat2]:.3f} > {self.correlation_threshold:.3f})"
                    )

            for feat in features_to_remove:
                removed_features.append(feat)
                remaining_features.remove(feat)
            if features_to_remove:
                logger.info(f"剔除高相关性特征: {len(features_to_remove)} 个")

        # 2.6 功能重叠组控制（仅保留 1-2 个代表）
        self._apply_overlap_reduction(remaining_features, quality_metrics, removal_reasons, removed_features)

        # 2.7 剔除低重要性特征
        importance_available = any(
            quality_metrics[feat].importance_score is not None for feat in remaining_features
        )
        if importance_available:
            low_importance_features = [
                feat
                for feat in remaining_features
                if quality_metrics[feat].importance_score is not None
                and quality_metrics[feat].importance_score < self.importance_threshold
            ]
            for feat in low_importance_features:
                removed_features.append(feat)
                removal_reasons[feat] = (
                    f"低重要性 (重要性={quality_metrics[feat].importance_score:.6f} < {self.importance_threshold:.6f})"
                )
                remaining_features.remove(feat)
            if low_importance_features:
                logger.info(f"剔除低重要性特征: {len(low_importance_features)} 个")

            self._deduplicate_similar_importance_features(
                remaining_features, quality_metrics, removal_reasons, removed_features
            )

        # 2.8 如果有目标变量，剔除低互信息特征
        if y is not None:
            low_mutual_info_features = [
                feat
                for feat in remaining_features
                if quality_metrics[feat].mutual_info < self.mutual_info_threshold
            ]
            for feat in low_mutual_info_features:
                removed_features.append(feat)
                removal_reasons[feat] = (
                    f"低互信息 (互信息={quality_metrics[feat].mutual_info:.6f} < {self.mutual_info_threshold:.6f})"
                )
                remaining_features.remove(feat)
            if low_mutual_info_features:
                logger.info(f"剔除低互信息特征: {len(low_mutual_info_features)} 个")

        # 2.9 剔除不稳定特征
        unstable_features = [
            feat for feat in remaining_features if quality_metrics[feat].stability_flag is not None
        ]
        for feat in unstable_features:
            removed_features.append(feat)
            removal_reasons[feat] = f"稳定性风险 ({quality_metrics[feat].stability_flag})"
            remaining_features.remove(feat)
        if unstable_features:
            logger.info(f"剔除稳定性风险特征: {len(unstable_features)} 个")

        selected_features = sorted(list(remaining_features))
        selected_count = len(selected_features)

        # 3. 确保特征数量与覆盖率
        selected_features, coverage_summary, coverage_warnings = self._enforce_feature_count_and_coverage(
            selected_features, quality_metrics
        )
        warnings.extend(coverage_warnings)
        selected_count = len(selected_features)

        logger.info(
            f"特征选择完成: 原始特征数={original_count}, 选中特征数={selected_count}, "
            f"剔除特征数={len(removed_features)}, 保留比例={selected_count/original_count:.2%}"
        )

        selection_reasons = self._build_selection_reasons(selected_features, quality_metrics)

        return FeatureSelectionResult(
            selected_features=selected_features,
            removed_features=removed_features,
            removal_reasons=removal_reasons,
            selection_reasons=selection_reasons,
            quality_metrics=quality_metrics,
            original_count=original_count,
            selected_count=selected_count,
            coverage_summary=coverage_summary,
            warnings=warnings,
        )

    def apply_selection(
        self,
        X: pd.DataFrame,
        selection_result: FeatureSelectionResult,
    ) -> pd.DataFrame:
        """应用特征选择结果，返回筛选后的特征数据.

        Parameters
        ----------
        X:
            原始特征数据
        selection_result:
            特征选择结果

        Returns
        -------
        筛选后的特征数据（只包含选中的特征）
        """
        return X[selection_result.selected_features].copy()

    # --------------------------------------------------------------------- #
    # 内部工具方法
    # --------------------------------------------------------------------- #

    def _process_feature_series(
        self,
        series: pd.Series,
        y: pd.Series | None,
    ) -> tuple[pd.Series, FeatureQualityMetrics]:
        """对单个特征执行清洗并计算质量指标."""
        cleaned = series.copy()
        variance = float(cleaned.var())
        missing_ratio = float(cleaned.isna().sum() / len(cleaned)) if len(cleaned) else 0.0

        cv = compute_coefficient_of_variation(cleaned)
        outlier_ratio = compute_iqr_outlier_ratio(cleaned)
        winsorized = False
        if outlier_ratio > self.outlier_ratio_threshold:
            cleaned = winsorize_series(cleaned)
            winsorized = True
            variance = float(cleaned.var())
            missing_ratio = float(cleaned.isna().sum() / len(cleaned)) if len(cleaned) else 0.0

        correlation_with_target = 0.0
        mutual_info = 0.0
        if y is not None:
            correlation_with_target = self._safe_correlation(cleaned, y)
            mutual_info = self._safe_mutual_info(cleaned, y)

        metrics = FeatureQualityMetrics(
            variance=variance,
            missing_ratio=missing_ratio,
            correlation_with_target=abs(correlation_with_target),
            mutual_info=mutual_info,
            coefficient_of_variation=cv,
            outlier_ratio=outlier_ratio,
            winsorized=winsorized,
        )

        return cleaned, metrics

    def _safe_correlation(self, feature: pd.Series, target: pd.Series) -> float:
        mask = ~(feature.isna() | target.isna())
        if mask.sum() == 0:
            return 0.0
        try:
            corr = feature[mask].corr(target[mask])
            return float(corr) if not np.isnan(corr) else 0.0
        except Exception:
            return 0.0

    def _safe_mutual_info(self, feature: pd.Series, target: pd.Series) -> float:
        mask = ~(feature.isna() | target.isna())
        if mask.sum() <= 10:
            return 0.0
        feature_values = feature[mask].values
        target_values = target[mask].values
        feature_values = np.where(np.isfinite(feature_values), feature_values, 0)
        target_values = np.where(np.isfinite(target_values), target_values, 0)
        if np.var(feature_values) < 1e-10:
            return 0.0
        try:
            mi = mutual_info_classif(
                feature_values.reshape(-1, 1),
                target_values,
                random_state=42,
                discrete_features=False,
            )
            return float(mi[0]) if len(mi) > 0 else 0.0
        except Exception as exc:
            logger.debug("计算互信息失败: %s", exc)
            return 0.0

    def _apply_stability_checks(
        self,
        X: pd.DataFrame,
        y: pd.Series,
        metrics: Dict[str, FeatureQualityMetrics],
    ) -> None:
        """执行时间稳定性与滚动窗口稳定性检测."""
        if len(X) < 20:
            return

        # 时间稳定性：前后半段
        # 降低阈值，使其更严格（从0.3降到0.2），减少不稳定特征
        split = len(X) // 2
        if split >= 20:
            left_y = y.iloc[:split]
            right_y = y.iloc[split:]
            for feat in X.columns:
                left_corr = self._safe_correlation(X[feat].iloc[:split], left_y)
                right_corr = self._safe_correlation(X[feat].iloc[split:], right_y)
                gap = abs(left_corr - right_corr)
                # 降低阈值，从0.3降到0.2，更严格地检测时间不稳定性
                if gap > 0.2:
                    metrics[feat].stability_flag = "time_instability"
                    metrics[feat].time_stability_gap = gap

        # 滚动窗口重要性（用相关性替代）波动
        # 降低阈值，使其更严格（从0.5降到0.3），减少不稳定特征
        windows = [60, 120, 180]
        for feat in X.columns:
            corr_values = []
            for window in windows:
                if len(X) < window or window < 10:
                    continue
                corr = self._safe_correlation(X[feat].iloc[-window:], y.iloc[-window:])
                if corr != 0:
                    corr_values.append(abs(corr))
            if len(corr_values) >= 2:
                spread = max(corr_values) - min(corr_values)
                mean_corr = np.mean(corr_values)
                ratio = spread / max(mean_corr, 1e-6)
                metrics[feat].rolling_importance_volatility = ratio
                # 降低阈值，从0.5降到0.3，更严格地检测滚动窗口不稳定性
                if ratio > 0.3:
                    metrics[feat].stability_flag = "rolling_instability"

    def _compute_random_forest_importance(
        self,
        X: pd.DataFrame,
        y: pd.Series | None,
    ) -> Dict[str, float]:
        """使用随机森林计算特征重要性."""
        if y is None or y.dropna().nunique() < 2:
            return {}
        common_index = X.dropna(how="all").index.intersection(y.dropna().index)
        if len(common_index) < 50:
            return {}
        X_subset = X.loc[common_index].fillna(0)
        y_subset = y.loc[common_index]
        if y_subset.nunique() < 2:
            return {}
        encoder = LabelEncoder()
        y_encoded = encoder.fit_transform(y_subset)
        model = RandomForestClassifier(
            n_estimators=100,
            max_depth=5,
            random_state=42,
            n_jobs=-1,
        )
        model.fit(X_subset, y_encoded)
        importances = model.feature_importances_
        total = importances.sum()
        if total == 0:
            return {}
        return {
            feature: importance / total
            for feature, importance in zip(X_subset.columns, importances)
        }

    def _apply_overlap_reduction(
        self,
        remaining_features: Set[str],
        metrics: Dict[str, FeatureQualityMetrics],
        removal_reasons: Dict[str, str],
        removed_features: List[str],
    ) -> None:
        """对功能重叠特征组执行保留 1-2 个代表的策略."""
        for group in FUNCTIONAL_OVERLAP_GROUPS:
            available = [feat for feat in group if feat in remaining_features]
            if len(available) <= 2:
                continue
            ranked = sorted(
                available,
                key=lambda feat: metrics[feat].correlation_with_target,
                reverse=True,
            )
            for feat in ranked[2:]:
                remaining_features.remove(feat)
                removed_features.append(feat)
                removal_reasons[feat] = "功能重叠 (同组保留不超过 2 个)"

    def _deduplicate_similar_importance_features(
        self,
        remaining_features: Set[str],
        metrics: Dict[str, FeatureQualityMetrics],
        removal_reasons: Dict[str, str],
        removed_features: List[str],
    ) -> None:
        """剔除重要性差异小于阈值的冗余特征，只保留每组最高者."""
        features_with_importance = [
            feat
            for feat in remaining_features
            if metrics[feat].importance_score is not None
        ]
        if len(features_with_importance) <= 1:
            return

        # 按重要性排序，方便逐组比较
        ranked = sorted(
            features_with_importance,
            key=lambda feat: metrics[feat].importance_score or 0.0,
            reverse=True,
        )
        grouped_out: Set[str] = set()
        for i, anchor in enumerate(ranked):
            if anchor in grouped_out:
                continue
            anchor_importance = metrics[anchor].importance_score or 0.0
            for candidate in ranked[i + 1 :]:
                if candidate in grouped_out:
                    continue
                candidate_importance = metrics[candidate].importance_score or 0.0
                gap = abs(anchor_importance - candidate_importance)
                if gap < self.importance_similarity_gap:
                    grouped_out.add(candidate)
                    if candidate in remaining_features:
                        remaining_features.remove(candidate)
                        removed_features.append(candidate)
                        removal_reasons[candidate] = (
                            "重要性差异<0.1% (与"
                            f" {anchor} 差异={gap:.6f} < {self.importance_similarity_gap:.6f})"
                        )

    def _enforce_feature_count_and_coverage(
        self,
        selected_features: List[str],
        metrics: Dict[str, FeatureQualityMetrics],
    ) -> tuple[List[str], Dict[str, int], List[str]]:
        """确保特征数量与覆盖率，并返回警告."""
        warnings: List[str] = []

        if len(selected_features) > self.max_feature_count:
            # 排序时优先考虑稳定性：稳定性高的特征优先保留
            # 排序键：(稳定性得分, 重要性得分, 相关性)
            # 稳定性得分：无稳定性标记=1.0，有标记=0.0
            ranking = sorted(
                selected_features,
                key=lambda feat: (
                    # 稳定性得分：无稳定性标记的特征优先
                    0.0 if metrics[feat].stability_flag is not None else 1.0,
                    # 重要性得分
                    metrics[feat].importance_score
                    if metrics[feat].importance_score is not None
                    else metrics[feat].correlation_with_target,
                ),
                reverse=True,
            )
            keep = ranking[: self.max_feature_count]
            drop = set(selected_features) - set(keep)
            selected_features = keep
            warnings.append(
                f"特征数超过 {self.max_feature_count}，自动裁剪 {len(drop)} 个低重要性/不稳定特征"
            )
        elif len(selected_features) < self.min_feature_count:
            warnings.append(
                f"特征数不足 {self.min_feature_count}（当前 {len(selected_features)}），建议补充特征或放宽筛选条件"
            )

        coverage_summary = summarize_feature_categories(selected_features)
        required_ratios = {
            "price_trend": 0.30,
            "volume": 0.20,
            "volatility": 0.20,
            "technical_indicator": 0.20,
            "interaction": 0.10,
        }
        total = max(len(selected_features), 1)
        for category, ratio in required_ratios.items():
            actual_ratio = coverage_summary.get(category, 0) / total
            if actual_ratio < ratio:
                warnings.append(
                    f"{category} 类特征占比 {actual_ratio:.0%} 低于建议 {ratio:.0%}"
                )

        return selected_features, coverage_summary, warnings

    def _build_selection_reasons(
        self,
        selected_features: List[str],
        metrics: Dict[str, FeatureQualityMetrics],
    ) -> Dict[str, str]:
        """为保留特征生成解释，记录相关性/重要性/稳定性."""
        reasons: Dict[str, str] = {}
        for feat in selected_features:
            metric = metrics[feat]
            importance = (
                f"{metric.importance_score:.3%}"
                if metric.importance_score is not None
                else "N/A"
            )
            stability = (
                "稳定"
                if metric.stability_flag is None
                else f"风险:{metric.stability_flag}"
            )
            reasons[feat] = (
                f"相关性={metric.correlation_with_target:.3f}, "
                f"重要性={importance}, "
                f"稳定性={stability}"
            )
        return reasons

