"""
洞察提取引擎

提供模式识别算法、关键发现提取逻辑和洞察质量评估功能
"""

import logging
import json
import numpy as np
import pandas as pd
from typing import Dict, List, Optional, Any, Tuple, Union, Callable
from dataclasses import dataclass, field
from enum import Enum
import datetime
import math
import statistics
from pathlib import Path
from scipy import stats
from sklearn.cluster import KMeans, DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import networkx as nx

from ..models.base_models import BaseModel
from .base_component import BaseComponent

logger = logging.getLogger(__name__)


class InsightType(Enum):
    """洞察类型"""
    CORRELATION = "correlation"         # 相关性洞察
    TREND = "trend"                    # 趋势洞察
    PATTERN = "pattern"                # 模式洞察
    ANOMALY = "anomaly"                # 异常洞察
    DISTRIBUTION = "distribution"       # 分布洞察
    CLUSTERING = "clustering"          # 聚类洞察
    CAUSALITY = "causality"            # 因果关系洞察
    SEASONALITY = "seasonality"        # 季节性洞察
    COMPARISON = "comparison"          # 比较洞察
    PREDICTION = "prediction"          # 预测洞察
    ASSOCIATION = "association"        # 关联洞察
    OUTLIER = "outlier"               # 离群值洞察


class InsightSignificance(Enum):
    """洞察重要性"""
    LOW = "low"                       # 低
    MEDIUM = "medium"                 # 中
    HIGH = "high"                     # 高
    CRITICAL = "critical"             # 关键


class InsightConfidence(Enum):
    """洞察置信度"""
    VERY_LOW = "very_low"             # 很低 (0-0.3)
    LOW = "low"                       # 低 (0.3-0.5)
    MEDIUM = "medium"                 # 中 (0.5-0.7)
    HIGH = "high"                     # 高 (0.7-0.9)
    VERY_HIGH = "very_high"           # 很高 (0.9-1.0)


@dataclass
class InsightEvidence(BaseModel):
    """洞察证据"""
    evidence_type: str = ""           # 证据类型
    description: str = ""             # 描述
    statistical_value: Optional[float] = None  # 统计值
    p_value: Optional[float] = None   # p值
    confidence_interval: Optional[Tuple[float, float]] = None  # 置信区间
    sample_size: Optional[int] = None # 样本大小
    effect_size: Optional[float] = None  # 效应大小
    
    # 支持数据
    supporting_data: Dict[str, Any] = field(default_factory=dict)
    visualization_data: Dict[str, Any] = field(default_factory=dict)


@dataclass
class Insight(BaseModel):
    """洞察"""
    # 基本信息
    insight_id: str = ""
    title: str = ""
    description: str = ""
    insight_type: InsightType = InsightType.PATTERN
    
    # 质量评估
    significance: InsightSignificance = InsightSignificance.MEDIUM
    confidence_score: float = 0.0
    confidence_level: InsightConfidence = InsightConfidence.MEDIUM
    novelty_score: float = 0.0
    actionability_score: float = 0.0
    
    # 统计信息
    statistical_significance: Optional[float] = None
    effect_size: Optional[float] = None
    sample_size: int = 0
    
    # 相关数据
    involved_variables: List[str] = field(default_factory=list)
    affected_records: List[int] = field(default_factory=list)
    time_range: Optional[Tuple[datetime.datetime, datetime.datetime]] = None
    
    # 证据支持
    evidence: List[InsightEvidence] = field(default_factory=list)
    
    # 业务影响
    business_impact: str = ""
    recommended_actions: List[str] = field(default_factory=list)
    
    # 元数据
    extraction_method: str = ""
    extraction_time: datetime.datetime = field(default_factory=datetime.datetime.now)
    validation_status: str = "pending"  # pending, validated, rejected
    
    def get_overall_quality_score(self) -> float:
        """获取总体质量评分"""
        weights = {
            'confidence': 0.3,
            'significance': 0.25,
            'novelty': 0.2,
            'actionability': 0.25
        }
        
        significance_scores = {
            InsightSignificance.LOW: 0.25,
            InsightSignificance.MEDIUM: 0.5,
            InsightSignificance.HIGH: 0.75,
            InsightSignificance.CRITICAL: 1.0
        }
        
        significance_score = significance_scores.get(self.significance, 0.5)
        
        return (
            weights['confidence'] * self.confidence_score +
            weights['significance'] * significance_score +
            weights['novelty'] * self.novelty_score +
            weights['actionability'] * self.actionability_score
        )
    
    def add_evidence(self, evidence: InsightEvidence):
        """添加证据"""
        self.evidence.append(evidence)
        
        # 更新置信度
        if evidence.p_value is not None and evidence.p_value < 0.05:
            self.confidence_score = min(self.confidence_score + 0.1, 1.0)
    
    def update_confidence_level(self):
        """更新置信度等级"""
        if self.confidence_score >= 0.9:
            self.confidence_level = InsightConfidence.VERY_HIGH
        elif self.confidence_score >= 0.7:
            self.confidence_level = InsightConfidence.HIGH
        elif self.confidence_score >= 0.5:
            self.confidence_level = InsightConfidence.MEDIUM
        elif self.confidence_score >= 0.3:
            self.confidence_level = InsightConfidence.LOW
        else:
            self.confidence_level = InsightConfidence.VERY_LOW


@dataclass
class InsightExtractionConfig(BaseModel):
    """洞察提取配置"""
    # 提取方法配置
    enable_correlation_analysis: bool = True
    enable_trend_analysis: bool = True
    enable_pattern_recognition: bool = True
    enable_anomaly_insights: bool = True
    enable_clustering_insights: bool = True
    
    # 统计阈值
    correlation_threshold: float = 0.5
    significance_level: float = 0.05
    min_sample_size: int = 10
    trend_window_size: int = 10
    
    # 质量过滤
    min_confidence_score: float = 0.3
    min_significance: InsightSignificance = InsightSignificance.LOW
    max_insights_per_type: int = 10
    
    # 聚类配置
    max_clusters: int = 10
    min_cluster_size: int = 5
    
    # 时间序列配置
    seasonal_periods: List[int] = field(default_factory=lambda: [7, 30, 365])
    
    # 业务配置
    business_context: Dict[str, Any] = field(default_factory=dict)
    domain_knowledge: Dict[str, Any] = field(default_factory=dict)


@dataclass
class ExtractionResult(BaseModel):
    """提取结果"""
    success: bool = True
    error_message: str = ""
    
    # 提取统计
    total_insights: int = 0
    insights_by_type: Dict[str, int] = field(default_factory=dict)
    insights_by_significance: Dict[str, int] = field(default_factory=dict)
    
    # 洞察列表
    insights: List[Insight] = field(default_factory=list)
    
    # 性能统计
    extraction_time_ms: float = 0.0
    data_points_analyzed: int = 0
    
    # 质量评估
    average_confidence: float = 0.0
    average_quality_score: float = 0.0
    
    def get_top_insights(self, n: int = 10) -> List[Insight]:
        """获取前N个最重要的洞察"""
        sorted_insights = sorted(
            self.insights, 
            key=lambda x: x.get_overall_quality_score(), 
            reverse=True
        )
        return sorted_insights[:n]
    
    def get_insights_by_type(self, insight_type: InsightType) -> List[Insight]:
        """按类型获取洞察"""
        return [insight for insight in self.insights if insight.insight_type == insight_type]
    
    def get_high_confidence_insights(self, min_confidence: float = 0.7) -> List[Insight]:
        """获取高置信度洞察"""
        return [insight for insight in self.insights if insight.confidence_score >= min_confidence]


class InsightExtractionEngine(BaseComponent):
    """洞察提取引擎"""
    
    def get_required_configs(self) -> List[str]:
        """获取必需的配置项"""
        return []
    
    def _setup_component(self):
        """设置组件特定的初始化逻辑"""
        self.logger.info("洞察提取引擎初始化")
        
        # 注册提取方法
        self.extraction_methods = {
            InsightType.CORRELATION: self._extract_correlation_insights,
            InsightType.TREND: self._extract_trend_insights,
            InsightType.PATTERN: self._extract_pattern_insights,
            InsightType.ANOMALY: self._extract_anomaly_insights,
            InsightType.DISTRIBUTION: self._extract_distribution_insights,
            InsightType.CLUSTERING: self._extract_clustering_insights,
            InsightType.SEASONALITY: self._extract_seasonality_insights,
            InsightType.COMPARISON: self._extract_comparison_insights,
            InsightType.ASSOCIATION: self._extract_association_insights,
            InsightType.OUTLIER: self._extract_outlier_insights
        }
        
        # 洞察模板
        self.insight_templates = {
            InsightType.CORRELATION: "发现 {var1} 与 {var2} 之间存在{strength}相关性 (r={correlation:.3f})",
            InsightType.TREND: "{variable} 在 {period} 期间呈现{direction}趋势，变化率为 {rate:.2f}%",
            InsightType.PATTERN: "在 {context} 中发现{pattern_type}模式，影响 {affected_count} 个数据点",
            InsightType.ANOMALY: "检测到 {count} 个异常值，主要集中在 {variable} 变量",
            InsightType.DISTRIBUTION: "{variable} 的分布呈现{distribution_type}特征，偏度为 {skewness:.2f}",
            InsightType.CLUSTERING: "数据可分为 {cluster_count} 个明显的群组，最大群组包含 {max_cluster_size} 个样本",
            InsightType.SEASONALITY: "{variable} 存在 {period} 天的周期性模式，强度为 {strength:.2f}",
            InsightType.COMPARISON: "{group1} 与 {group2} 在 {variable} 上存在显著差异 (p={p_value:.3f})",
            InsightType.ASSOCIATION: "发现 {item1} 与 {item2} 之间的关联规则，支持度为 {support:.2f}",
            InsightType.OUTLIER: "识别出 {count} 个离群值，偏离程度最大的是 {max_outlier}"
        }
        
        # 缓存提取结果
        self.extraction_cache: Dict[str, ExtractionResult] = {}
        
        self.logger.info("洞察提取引擎初始化完成")  
  
    def extract_insights(self, 
                        data: pd.DataFrame, 
                        config: InsightExtractionConfig = None) -> ExtractionResult:
        """
        提取数据洞察
        
        Args:
            data: 输入数据DataFrame
            config: 提取配置
            
        Returns:
            提取结果
        """
        try:
            start_time = datetime.datetime.now()
            
            if config is None:
                config = InsightExtractionConfig()
            
            # 初始化结果
            result = ExtractionResult(
                data_points_analyzed=len(data)
            )
            
            all_insights = []
            
            # 执行各种洞察提取
            extraction_tasks = [
                (InsightType.CORRELATION, config.enable_correlation_analysis),
                (InsightType.TREND, config.enable_trend_analysis),
                (InsightType.PATTERN, config.enable_pattern_recognition),
                (InsightType.ANOMALY, config.enable_anomaly_insights),
                (InsightType.CLUSTERING, config.enable_clustering_insights),
                (InsightType.DISTRIBUTION, True),  # 总是启用分布分析
                (InsightType.SEASONALITY, True),
                (InsightType.COMPARISON, True),
                (InsightType.ASSOCIATION, True),
                (InsightType.OUTLIER, True)
            ]
            
            for insight_type, enabled in extraction_tasks:
                if enabled and insight_type in self.extraction_methods:
                    try:
                        insights = self.extraction_methods[insight_type](data, config)
                        all_insights.extend(insights)
                        self.logger.info(f"提取 {insight_type.value} 洞察: {len(insights)} 个")
                    except Exception as e:
                        self.logger.warning(f"提取 {insight_type.value} 洞察失败: {str(e)}")
            
            # 过滤和排序洞察
            filtered_insights = self._filter_insights(all_insights, config)
            result.insights = self._rank_insights(filtered_insights)
            
            # 更新统计信息
            result.total_insights = len(result.insights)
            result.insights_by_type = self._count_insights_by_type(result.insights)
            result.insights_by_significance = self._count_insights_by_significance(result.insights)
            
            # 计算质量指标
            if result.insights:
                result.average_confidence = sum(i.confidence_score for i in result.insights) / len(result.insights)
                result.average_quality_score = sum(i.get_overall_quality_score() for i in result.insights) / len(result.insights)
            
            # 计算执行时间
            execution_time = (datetime.datetime.now() - start_time).total_seconds() * 1000
            result.extraction_time_ms = execution_time
            
            self.logger.info(f"洞察提取完成: 提取到 {result.total_insights} 个洞察 (耗时: {execution_time:.2f}ms)")
            return result
            
        except Exception as e:
            self.logger.error(f"洞察提取失败: {str(e)}")
            return ExtractionResult(
                success=False,
                error_message=str(e),
                data_points_analyzed=len(data) if data is not None else 0
            )
    
    def extract_targeted_insights(self, 
                                data: pd.DataFrame, 
                                target_variable: str,
                                config: InsightExtractionConfig = None) -> ExtractionResult:
        """
        针对特定目标变量提取洞察
        
        Args:
            data: 输入数据DataFrame
            target_variable: 目标变量名
            config: 提取配置
            
        Returns:
            提取结果
        """
        try:
            if config is None:
                config = InsightExtractionConfig()
            
            # 添加目标变量到业务上下文
            config.business_context['target_variable'] = target_variable
            
            # 重点分析与目标变量相关的洞察
            insights = []
            
            # 相关性分析
            if target_variable in data.columns:
                correlation_insights = self._extract_target_correlations(data, target_variable, config)
                insights.extend(correlation_insights)
                
                # 特征重要性分析
                importance_insights = self._extract_feature_importance(data, target_variable, config)
                insights.extend(importance_insights)
                
                # 目标变量分布分析
                distribution_insights = self._extract_target_distribution(data, target_variable, config)
                insights.extend(distribution_insights)
            
            # 创建结果
            result = ExtractionResult(
                insights=insights,
                total_insights=len(insights),
                data_points_analyzed=len(data)
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"目标洞察提取失败: {str(e)}")
            return ExtractionResult(
                success=False,
                error_message=str(e),
                data_points_analyzed=len(data) if data is not None else 0
            )
    
    # 具体洞察提取方法
    def _extract_correlation_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取相关性洞察"""
        insights = []
        
        # 选择数值列
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        if len(numeric_columns) < 2:
            return insights
        
        # 计算相关性矩阵
        correlation_matrix = data[numeric_columns].corr()
        
        # 提取显著相关性
        for i, var1 in enumerate(numeric_columns):
            for j, var2 in enumerate(numeric_columns):
                if i < j:  # 避免重复和自相关
                    correlation = correlation_matrix.loc[var1, var2]
                    
                    if abs(correlation) >= config.correlation_threshold:
                        # 计算统计显著性
                        n = len(data[[var1, var2]].dropna())
                        if n >= config.min_sample_size:
                            # 计算p值
                            t_stat = correlation * math.sqrt((n - 2) / (1 - correlation**2))
                            p_value = 2 * (1 - stats.t.cdf(abs(t_stat), n - 2))
                            
                            if p_value < config.significance_level:
                                # 确定相关性强度
                                if abs(correlation) >= 0.8:
                                    strength = "强"
                                elif abs(correlation) >= 0.6:
                                    strength = "中等"
                                else:
                                    strength = "弱"
                                
                                # 创建洞察
                                insight = Insight(
                                    insight_id=f"corr_{var1}_{var2}",
                                    title=f"{var1} 与 {var2} 的相关性",
                                    description=self.insight_templates[InsightType.CORRELATION].format(
                                        var1=var1, var2=var2, strength=strength, correlation=correlation
                                    ),
                                    insight_type=InsightType.CORRELATION,
                                    confidence_score=1 - p_value,
                                    statistical_significance=p_value,
                                    effect_size=abs(correlation),
                                    sample_size=n,
                                    involved_variables=[var1, var2],
                                    extraction_method="pearson_correlation"
                                )
                                
                                # 添加证据
                                evidence = InsightEvidence(
                                    evidence_type="correlation_analysis",
                                    description=f"皮尔逊相关系数分析",
                                    statistical_value=correlation,
                                    p_value=p_value,
                                    sample_size=n,
                                    effect_size=abs(correlation)
                                )
                                insight.add_evidence(evidence)
                                
                                # 确定重要性
                                if abs(correlation) >= 0.8:
                                    insight.significance = InsightSignificance.HIGH
                                elif abs(correlation) >= 0.6:
                                    insight.significance = InsightSignificance.MEDIUM
                                else:
                                    insight.significance = InsightSignificance.LOW
                                
                                insight.update_confidence_level()
                                insights.append(insight)
        
        return insights
    
    def _extract_trend_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取趋势洞察"""
        insights = []
        
        # 寻找时间列
        time_columns = []
        for col in data.columns:
            if pd.api.types.is_datetime64_any_dtype(data[col]):
                time_columns.append(col)
            elif data[col].dtype == 'object':
                try:
                    pd.to_datetime(data[col].iloc[0])
                    time_columns.append(col)
                except:
                    pass
        
        if not time_columns:
            return insights
        
        time_col = time_columns[0]
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        for var in numeric_columns:
            # 按时间排序
            sorted_data = data[[time_col, var]].dropna().sort_values(time_col)
            
            if len(sorted_data) >= config.trend_window_size:
                # 线性回归分析趋势
                x = np.arange(len(sorted_data))
                y = sorted_data[var].values
                
                slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
                
                if p_value < config.significance_level and abs(r_value) > 0.3:
                    # 计算变化率
                    if len(y) > 1:
                        change_rate = ((y[-1] - y[0]) / y[0]) * 100 if y[0] != 0 else 0
                    else:
                        change_rate = 0
                    
                    # 确定趋势方向
                    if slope > 0:
                        direction = "上升"
                    elif slope < 0:
                        direction = "下降"
                    else:
                        direction = "平稳"
                    
                    # 时间范围
                    time_range = (
                        sorted_data[time_col].iloc[0],
                        sorted_data[time_col].iloc[-1]
                    )
                    
                    # 创建洞察
                    insight = Insight(
                        insight_id=f"trend_{var}",
                        title=f"{var} 的趋势分析",
                        description=self.insight_templates[InsightType.TREND].format(
                            variable=var, period=f"{time_range[0].date()} 至 {time_range[1].date()}",
                            direction=direction, rate=change_rate
                        ),
                        insight_type=InsightType.TREND,
                        confidence_score=abs(r_value),
                        statistical_significance=p_value,
                        effect_size=abs(slope),
                        sample_size=len(sorted_data),
                        involved_variables=[var],
                        time_range=time_range,
                        extraction_method="linear_regression"
                    )
                    
                    # 添加证据
                    evidence = InsightEvidence(
                        evidence_type="trend_analysis",
                        description="线性回归趋势分析",
                        statistical_value=slope,
                        p_value=p_value,
                        sample_size=len(sorted_data),
                        effect_size=abs(r_value),
                        supporting_data={
                            'r_squared': r_value**2,
                            'change_rate': change_rate,
                            'direction': direction
                        }
                    )
                    insight.add_evidence(evidence)
                    
                    # 确定重要性
                    if abs(change_rate) >= 50:
                        insight.significance = InsightSignificance.HIGH
                    elif abs(change_rate) >= 20:
                        insight.significance = InsightSignificance.MEDIUM
                    else:
                        insight.significance = InsightSignificance.LOW
                    
                    insight.update_confidence_level()
                    insights.append(insight)
        
        return insights
    
    def _extract_pattern_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取模式洞察"""
        insights = []
        
        # 数值模式分析
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        for col in numeric_columns:
            values = data[col].dropna()
            if len(values) >= config.min_sample_size:
                # 检测周期性模式
                if len(values) >= 20:
                    # 简单的自相关分析
                    autocorr_values = []
                    for lag in range(1, min(len(values)//4, 50)):
                        if len(values) > lag:
                            corr = np.corrcoef(values[:-lag], values[lag:])[0, 1]
                            if not np.isnan(corr):
                                autocorr_values.append((lag, corr))
                    
                    # 寻找显著的自相关
                    significant_lags = [(lag, corr) for lag, corr in autocorr_values if abs(corr) > 0.3]
                    
                    if significant_lags:
                        best_lag, best_corr = max(significant_lags, key=lambda x: abs(x[1]))
                        
                        insight = Insight(
                            insight_id=f"pattern_{col}_periodic",
                            title=f"{col} 的周期性模式",
                            description=f"{col} 存在 {best_lag} 期的周期性模式，自相关系数为 {best_corr:.3f}",
                            insight_type=InsightType.PATTERN,
                            confidence_score=abs(best_corr),
                            effect_size=abs(best_corr),
                            sample_size=len(values),
                            involved_variables=[col],
                            extraction_method="autocorrelation_analysis"
                        )
                        
                        evidence = InsightEvidence(
                            evidence_type="pattern_analysis",
                            description="自相关周期性分析",
                            statistical_value=best_corr,
                            sample_size=len(values),
                            supporting_data={
                                'lag': best_lag,
                                'pattern_type': 'periodic'
                            }
                        )
                        insight.add_evidence(evidence)
                        
                        insight.significance = InsightSignificance.MEDIUM if abs(best_corr) > 0.5 else InsightSignificance.LOW
                        insight.update_confidence_level()
                        insights.append(insight)
        
        # 分类变量模式分析
        categorical_columns = data.select_dtypes(include=['object', 'category']).columns.tolist()
        
        for col in categorical_columns:
            value_counts = data[col].value_counts()
            if len(value_counts) >= 2:
                # 检测分布模式
                total_count = value_counts.sum()
                dominant_value = value_counts.index[0]
                dominant_ratio = value_counts.iloc[0] / total_count
                
                if dominant_ratio >= 0.7:  # 主导模式
                    insight = Insight(
                        insight_id=f"pattern_{col}_dominant",
                        title=f"{col} 的主导模式",
                        description=f"{col} 中 '{dominant_value}' 占主导地位，比例为 {dominant_ratio:.1%}",
                        insight_type=InsightType.PATTERN,
                        confidence_score=dominant_ratio,
                        sample_size=total_count,
                        involved_variables=[col],
                        extraction_method="frequency_analysis"
                    )
                    
                    evidence = InsightEvidence(
                        evidence_type="distribution_pattern",
                        description="频率分布分析",
                        statistical_value=dominant_ratio,
                        sample_size=total_count,
                        supporting_data={
                            'dominant_value': dominant_value,
                            'pattern_type': 'dominant'
                        }
                    )
                    insight.add_evidence(evidence)
                    
                    insight.significance = InsightSignificance.MEDIUM
                    insight.update_confidence_level()
                    insights.append(insight)
        
        return insights
    
    def _extract_anomaly_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取异常洞察"""
        insights = []
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        for col in numeric_columns:
            values = data[col].dropna()
            if len(values) >= config.min_sample_size:
                # 使用IQR方法检测异常值
                Q1 = values.quantile(0.25)
                Q3 = values.quantile(0.75)
                IQR = Q3 - Q1
                
                if IQR > 0:
                    lower_bound = Q1 - 1.5 * IQR
                    upper_bound = Q3 + 1.5 * IQR
                    
                    outliers = values[(values < lower_bound) | (values > upper_bound)]
                    
                    if len(outliers) > 0:
                        outlier_ratio = len(outliers) / len(values)
                        
                        insight = Insight(
                            insight_id=f"anomaly_{col}",
                            title=f"{col} 的异常值分析",
                            description=self.insight_templates[InsightType.ANOMALY].format(
                                count=len(outliers), variable=col
                            ),
                            insight_type=InsightType.ANOMALY,
                            confidence_score=min(outlier_ratio * 10, 1.0),  # 异常比例越高置信度越高
                            sample_size=len(values),
                            involved_variables=[col],
                            affected_records=outliers.index.tolist(),
                            extraction_method="iqr_outlier_detection"
                        )
                        
                        evidence = InsightEvidence(
                            evidence_type="outlier_analysis",
                            description="IQR异常值检测",
                            statistical_value=outlier_ratio,
                            sample_size=len(values),
                            supporting_data={
                                'outlier_count': len(outliers),
                                'lower_bound': lower_bound,
                                'upper_bound': upper_bound,
                                'outlier_values': outliers.tolist()[:10]  # 只保存前10个
                            }
                        )
                        insight.add_evidence(evidence)
                        
                        # 根据异常比例确定重要性
                        if outlier_ratio >= 0.1:
                            insight.significance = InsightSignificance.HIGH
                        elif outlier_ratio >= 0.05:
                            insight.significance = InsightSignificance.MEDIUM
                        else:
                            insight.significance = InsightSignificance.LOW
                        
                        insight.update_confidence_level()
                        insights.append(insight)
        
        return insights
    
    def _extract_distribution_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取分布洞察"""
        insights = []
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        for col in numeric_columns:
            values = data[col].dropna()
            if len(values) >= config.min_sample_size:
                # 计算分布特征
                skewness = stats.skew(values)
                kurtosis = stats.kurtosis(values)
                
                # 确定分布类型
                if abs(skewness) < 0.5:
                    distribution_type = "对称"
                elif skewness > 0.5:
                    distribution_type = "右偏"
                else:
                    distribution_type = "左偏"
                
                # 正态性检验
                if len(values) >= 8:
                    _, p_value = stats.normaltest(values)
                    is_normal = p_value > config.significance_level
                else:
                    p_value = None
                    is_normal = False
                
                insight = Insight(
                    insight_id=f"dist_{col}",
                    title=f"{col} 的分布特征",
                    description=self.insight_templates[InsightType.DISTRIBUTION].format(
                        variable=col, distribution_type=distribution_type, skewness=skewness
                    ),
                    insight_type=InsightType.DISTRIBUTION,
                    confidence_score=0.8,  # 分布分析通常比较可靠
                    statistical_significance=p_value,
                    sample_size=len(values),
                    involved_variables=[col],
                    extraction_method="distribution_analysis"
                )
                
                evidence = InsightEvidence(
                    evidence_type="distribution_analysis",
                    description="分布特征分析",
                    statistical_value=skewness,
                    p_value=p_value,
                    sample_size=len(values),
                    supporting_data={
                        'kurtosis': kurtosis,
                        'is_normal': is_normal,
                        'distribution_type': distribution_type,
                        'mean': float(values.mean()),
                        'std': float(values.std()),
                        'median': float(values.median())
                    }
                )
                insight.add_evidence(evidence)
                
                # 根据偏度确定重要性
                if abs(skewness) >= 2:
                    insight.significance = InsightSignificance.HIGH
                elif abs(skewness) >= 1:
                    insight.significance = InsightSignificance.MEDIUM
                else:
                    insight.significance = InsightSignificance.LOW
                
                insight.update_confidence_level()
                insights.append(insight)
        
        return insights    

    def _extract_clustering_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取聚类洞察"""
        insights = []
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        if len(numeric_columns) < 2:
            return insights
        
        # 准备聚类数据
        cluster_data = data[numeric_columns].dropna()
        if len(cluster_data) < config.min_cluster_size * 2:
            return insights
        
        # 标准化数据
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(cluster_data)
        
        # 尝试不同的聚类数
        best_k = 2
        best_score = -1
        
        for k in range(2, min(config.max_clusters + 1, len(cluster_data) // config.min_cluster_size)):
            try:
                kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
                labels = kmeans.fit_predict(scaled_data)
                
                # 计算轮廓系数
                from sklearn.metrics import silhouette_score
                score = silhouette_score(scaled_data, labels)
                
                if score > best_score:
                    best_score = score
                    best_k = k
            except:
                continue
        
        if best_score > 0.3:  # 只有当聚类质量足够好时才生成洞察
            # 执行最佳聚类
            kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
            labels = kmeans.fit_predict(scaled_data)
            
            # 分析聚类结果
            cluster_sizes = np.bincount(labels)
            max_cluster_size = max(cluster_sizes)
            
            insight = Insight(
                insight_id=f"clustering_kmeans",
                title="数据聚类分析",
                description=self.insight_templates[InsightType.CLUSTERING].format(
                    cluster_count=best_k, max_cluster_size=max_cluster_size
                ),
                insight_type=InsightType.CLUSTERING,
                confidence_score=best_score,
                sample_size=len(cluster_data),
                involved_variables=numeric_columns,
                extraction_method="kmeans_clustering"
            )
            
            evidence = InsightEvidence(
                evidence_type="clustering_analysis",
                description="K-means聚类分析",
                statistical_value=best_score,
                sample_size=len(cluster_data),
                supporting_data={
                    'cluster_count': best_k,
                    'silhouette_score': best_score,
                    'cluster_sizes': cluster_sizes.tolist(),
                    'cluster_centers': kmeans.cluster_centers_.tolist()
                }
            )
            insight.add_evidence(evidence)
            
            # 根据聚类质量确定重要性
            if best_score >= 0.7:
                insight.significance = InsightSignificance.HIGH
            elif best_score >= 0.5:
                insight.significance = InsightSignificance.MEDIUM
            else:
                insight.significance = InsightSignificance.LOW
            
            insight.update_confidence_level()
            insights.append(insight)
        
        return insights
    
    def _extract_seasonality_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取季节性洞察"""
        insights = []
        
        # 寻找时间列
        time_columns = []
        for col in data.columns:
            if pd.api.types.is_datetime64_any_dtype(data[col]):
                time_columns.append(col)
        
        if not time_columns:
            return insights
        
        time_col = time_columns[0]
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        for var in numeric_columns:
            # 按时间排序
            sorted_data = data[[time_col, var]].dropna().sort_values(time_col)
            
            if len(sorted_data) >= 30:  # 至少需要30个数据点
                values = sorted_data[var].values
                
                # 检测不同周期的季节性
                for period in config.seasonal_periods:
                    if len(values) >= period * 2:
                        # 计算季节性强度
                        seasonal_strength = self._calculate_seasonal_strength(values, period)
                        
                        if seasonal_strength > 0.3:  # 季节性阈值
                            insight = Insight(
                                insight_id=f"seasonality_{var}_{period}",
                                title=f"{var} 的季节性模式",
                                description=self.insight_templates[InsightType.SEASONALITY].format(
                                    variable=var, period=period, strength=seasonal_strength
                                ),
                                insight_type=InsightType.SEASONALITY,
                                confidence_score=seasonal_strength,
                                sample_size=len(values),
                                involved_variables=[var],
                                extraction_method="seasonal_decomposition"
                            )
                            
                            evidence = InsightEvidence(
                                evidence_type="seasonality_analysis",
                                description=f"{period}期季节性分析",
                                statistical_value=seasonal_strength,
                                sample_size=len(values),
                                supporting_data={
                                    'period': period,
                                    'seasonal_strength': seasonal_strength
                                }
                            )
                            insight.add_evidence(evidence)
                            
                            # 根据季节性强度确定重要性
                            if seasonal_strength >= 0.7:
                                insight.significance = InsightSignificance.HIGH
                            elif seasonal_strength >= 0.5:
                                insight.significance = InsightSignificance.MEDIUM
                            else:
                                insight.significance = InsightSignificance.LOW
                            
                            insight.update_confidence_level()
                            insights.append(insight)
        
        return insights
    
    def _extract_comparison_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取比较洞察"""
        insights = []
        
        categorical_columns = data.select_dtypes(include=['object', 'category']).columns.tolist()
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        # 分组比较分析
        for cat_col in categorical_columns:
            groups = data[cat_col].value_counts()
            if len(groups) >= 2:  # 至少需要2个组
                for num_col in numeric_columns:
                    # 获取各组数据
                    group_data = []
                    group_names = []
                    
                    for group_name in groups.index[:5]:  # 最多比较前5个组
                        group_values = data[data[cat_col] == group_name][num_col].dropna()
                        if len(group_values) >= config.min_sample_size:
                            group_data.append(group_values)
                            group_names.append(group_name)
                    
                    if len(group_data) >= 2:
                        # 执行方差分析或t检验
                        if len(group_data) == 2:
                            # t检验
                            stat, p_value = stats.ttest_ind(group_data[0], group_data[1])
                            test_name = "t检验"
                        else:
                            # 方差分析
                            stat, p_value = stats.f_oneway(*group_data)
                            test_name = "方差分析"
                        
                        if p_value < config.significance_level:
                            # 计算效应大小
                            if len(group_data) == 2:
                                pooled_std = np.sqrt(((len(group_data[0])-1)*group_data[0].var() + 
                                                    (len(group_data[1])-1)*group_data[1].var()) / 
                                                   (len(group_data[0])+len(group_data[1])-2))
                                effect_size = abs(group_data[0].mean() - group_data[1].mean()) / pooled_std
                            else:
                                # 使用eta squared作为效应大小
                                effect_size = stat / (stat + sum(len(g)-1 for g in group_data))
                            
                            insight = Insight(
                                insight_id=f"comparison_{cat_col}_{num_col}",
                                title=f"{cat_col} 组间 {num_col} 的差异",
                                description=self.insight_templates[InsightType.COMPARISON].format(
                                    group1=group_names[0], group2=group_names[1] if len(group_names) > 1 else "其他组",
                                    variable=num_col, p_value=p_value
                                ),
                                insight_type=InsightType.COMPARISON,
                                confidence_score=1 - p_value,
                                statistical_significance=p_value,
                                effect_size=effect_size,
                                sample_size=sum(len(g) for g in group_data),
                                involved_variables=[cat_col, num_col],
                                extraction_method=test_name.lower().replace(" ", "_")
                            )
                            
                            evidence = InsightEvidence(
                                evidence_type="group_comparison",
                                description=test_name,
                                statistical_value=stat,
                                p_value=p_value,
                                sample_size=sum(len(g) for g in group_data),
                                effect_size=effect_size,
                                supporting_data={
                                    'group_means': [float(g.mean()) for g in group_data],
                                    'group_names': group_names,
                                    'test_statistic': float(stat)
                                }
                            )
                            insight.add_evidence(evidence)
                            
                            # 根据效应大小确定重要性
                            if effect_size >= 0.8:
                                insight.significance = InsightSignificance.HIGH
                            elif effect_size >= 0.5:
                                insight.significance = InsightSignificance.MEDIUM
                            else:
                                insight.significance = InsightSignificance.LOW
                            
                            insight.update_confidence_level()
                            insights.append(insight)
        
        return insights
    
    def _extract_association_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取关联洞察"""
        insights = []
        
        categorical_columns = data.select_dtypes(include=['object', 'category']).columns.tolist()
        
        # 分类变量间的关联分析
        for i, col1 in enumerate(categorical_columns):
            for col2 in categorical_columns[i+1:]:
                # 创建交叉表
                crosstab = pd.crosstab(data[col1], data[col2])
                
                if crosstab.shape[0] >= 2 and crosstab.shape[1] >= 2:
                    # 卡方检验
                    chi2, p_value, dof, expected = stats.chi2_contingency(crosstab)
                    
                    if p_value < config.significance_level:
                        # 计算Cramer's V作为关联强度
                        n = crosstab.sum().sum()
                        cramers_v = np.sqrt(chi2 / (n * (min(crosstab.shape) - 1)))
                        
                        insight = Insight(
                            insight_id=f"association_{col1}_{col2}",
                            title=f"{col1} 与 {col2} 的关联性",
                            description=f"{col1} 与 {col2} 之间存在显著关联 (Cramer's V = {cramers_v:.3f})",
                            insight_type=InsightType.ASSOCIATION,
                            confidence_score=1 - p_value,
                            statistical_significance=p_value,
                            effect_size=cramers_v,
                            sample_size=int(n),
                            involved_variables=[col1, col2],
                            extraction_method="chi_square_test"
                        )
                        
                        evidence = InsightEvidence(
                            evidence_type="association_analysis",
                            description="卡方独立性检验",
                            statistical_value=chi2,
                            p_value=p_value,
                            sample_size=int(n),
                            effect_size=cramers_v,
                            supporting_data={
                                'cramers_v': cramers_v,
                                'degrees_of_freedom': dof,
                                'crosstab': crosstab.to_dict()
                            }
                        )
                        insight.add_evidence(evidence)
                        
                        # 根据关联强度确定重要性
                        if cramers_v >= 0.5:
                            insight.significance = InsightSignificance.HIGH
                        elif cramers_v >= 0.3:
                            insight.significance = InsightSignificance.MEDIUM
                        else:
                            insight.significance = InsightSignificance.LOW
                        
                        insight.update_confidence_level()
                        insights.append(insight)
        
        return insights
    
    def _extract_outlier_insights(self, data: pd.DataFrame, config: InsightExtractionConfig) -> List[Insight]:
        """提取离群值洞察"""
        insights = []
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        
        if len(numeric_columns) >= 2:
            # 多维离群值检测
            from sklearn.ensemble import IsolationForest
            
            clean_data = data[numeric_columns].dropna()
            if len(clean_data) >= config.min_sample_size:
                # 孤立森林检测
                iso_forest = IsolationForest(contamination=0.1, random_state=42)
                outlier_labels = iso_forest.fit_predict(clean_data)
                
                outliers = clean_data[outlier_labels == -1]
                
                if len(outliers) > 0:
                    # 找到最极端的离群值
                    anomaly_scores = iso_forest.decision_function(clean_data)
                    most_extreme_idx = np.argmin(anomaly_scores)
                    most_extreme_outlier = clean_data.iloc[most_extreme_idx]
                    
                    insight = Insight(
                        insight_id=f"outlier_multivariate",
                        title="多维离群值分析",
                        description=self.insight_templates[InsightType.OUTLIER].format(
                            count=len(outliers), 
                            max_outlier=f"索引 {most_extreme_outlier.name}"
                        ),
                        insight_type=InsightType.OUTLIER,
                        confidence_score=min(len(outliers) / len(clean_data) * 10, 1.0),
                        sample_size=len(clean_data),
                        involved_variables=numeric_columns,
                        affected_records=outliers.index.tolist(),
                        extraction_method="isolation_forest"
                    )
                    
                    evidence = InsightEvidence(
                        evidence_type="outlier_detection",
                        description="孤立森林多维离群值检测",
                        statistical_value=len(outliers) / len(clean_data),
                        sample_size=len(clean_data),
                        supporting_data={
                            'outlier_count': len(outliers),
                            'outlier_ratio': len(outliers) / len(clean_data),
                            'most_extreme_score': float(anomaly_scores[most_extreme_idx]),
                            'outlier_indices': outliers.index.tolist()[:10]  # 只保存前10个
                        }
                    )
                    insight.add_evidence(evidence)
                    
                    # 根据离群值比例确定重要性
                    outlier_ratio = len(outliers) / len(clean_data)
                    if outlier_ratio >= 0.1:
                        insight.significance = InsightSignificance.HIGH
                    elif outlier_ratio >= 0.05:
                        insight.significance = InsightSignificance.MEDIUM
                    else:
                        insight.significance = InsightSignificance.LOW
                    
                    insight.update_confidence_level()
                    insights.append(insight)
        
        return insights
    
    # 目标变量相关的特殊提取方法
    def _extract_target_correlations(self, data: pd.DataFrame, target_variable: str, config: InsightExtractionConfig) -> List[Insight]:
        """提取与目标变量的相关性洞察"""
        insights = []
        
        if target_variable not in data.columns:
            return insights
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        if target_variable not in numeric_columns:
            return insights
        
        target_data = data[target_variable].dropna()
        
        for col in numeric_columns:
            if col != target_variable:
                col_data = data[col].dropna()
                
                # 找到共同的索引
                common_idx = target_data.index.intersection(col_data.index)
                if len(common_idx) >= config.min_sample_size:
                    x = data.loc[common_idx, col]
                    y = data.loc[common_idx, target_variable]
                    
                    correlation = np.corrcoef(x, y)[0, 1]
                    
                    if abs(correlation) >= config.correlation_threshold:
                        # 计算统计显著性
                        n = len(common_idx)
                        t_stat = correlation * math.sqrt((n - 2) / (1 - correlation**2))
                        p_value = 2 * (1 - stats.t.cdf(abs(t_stat), n - 2))
                        
                        if p_value < config.significance_level:
                            insight = Insight(
                                insight_id=f"target_corr_{col}",
                                title=f"{col} 与目标变量 {target_variable} 的相关性",
                                description=f"{col} 与目标变量 {target_variable} 相关性为 {correlation:.3f}",
                                insight_type=InsightType.CORRELATION,
                                confidence_score=1 - p_value,
                                statistical_significance=p_value,
                                effect_size=abs(correlation),
                                sample_size=n,
                                involved_variables=[col, target_variable],
                                extraction_method="target_correlation"
                            )
                            
                            evidence = InsightEvidence(
                                evidence_type="target_correlation",
                                description=f"与目标变量的相关性分析",
                                statistical_value=correlation,
                                p_value=p_value,
                                sample_size=n,
                                effect_size=abs(correlation)
                            )
                            insight.add_evidence(evidence)
                            
                            # 目标相关性通常更重要
                            if abs(correlation) >= 0.7:
                                insight.significance = InsightSignificance.CRITICAL
                            elif abs(correlation) >= 0.5:
                                insight.significance = InsightSignificance.HIGH
                            else:
                                insight.significance = InsightSignificance.MEDIUM
                            
                            insight.update_confidence_level()
                            insights.append(insight)
        
        return insights
    
    def _extract_feature_importance(self, data: pd.DataFrame, target_variable: str, config: InsightExtractionConfig) -> List[Insight]:
        """提取特征重要性洞察"""
        insights = []
        
        if target_variable not in data.columns:
            return insights
        
        numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
        feature_columns = [col for col in numeric_columns if col != target_variable]
        
        if len(feature_columns) < 2:
            return insights
        
        # 准备数据
        clean_data = data[feature_columns + [target_variable]].dropna()
        if len(clean_data) < config.min_sample_size:
            return insights
        
        X = clean_data[feature_columns]
        y = clean_data[target_variable]
        
        # 使用SelectKBest进行特征选择
        selector = SelectKBest(score_func=f_classif, k='all')
        selector.fit(X, y)
        
        # 获取特征重要性分数
        feature_scores = selector.scores_
        feature_pvalues = selector.pvalues_
        
        # 创建特征重要性洞察
        for i, (feature, score, p_value) in enumerate(zip(feature_columns, feature_scores, feature_pvalues)):
            if p_value < config.significance_level and not np.isnan(score):
                insight = Insight(
                    insight_id=f"feature_importance_{feature}",
                    title=f"{feature} 的特征重要性",
                    description=f"{feature} 对目标变量 {target_variable} 的重要性评分为 {score:.2f}",
                    insight_type=InsightType.CORRELATION,
                    confidence_score=1 - p_value,
                    statistical_significance=p_value,
                    effect_size=score / max(feature_scores) if max(feature_scores) > 0 else 0,
                    sample_size=len(clean_data),
                    involved_variables=[feature, target_variable],
                    extraction_method="feature_importance_analysis"
                )
                
                evidence = InsightEvidence(
                    evidence_type="feature_importance",
                    description="特征重要性分析",
                    statistical_value=score,
                    p_value=p_value,
                    sample_size=len(clean_data),
                    supporting_data={
                        'importance_score': float(score),
                        'rank': int(np.argsort(feature_scores)[::-1].tolist().index(i) + 1)
                    }
                )
                insight.add_evidence(evidence)
                
                # 根据重要性评分确定重要性
                normalized_score = score / max(feature_scores) if max(feature_scores) > 0 else 0
                if normalized_score >= 0.8:
                    insight.significance = InsightSignificance.CRITICAL
                elif normalized_score >= 0.6:
                    insight.significance = InsightSignificance.HIGH
                elif normalized_score >= 0.4:
                    insight.significance = InsightSignificance.MEDIUM
                else:
                    insight.significance = InsightSignificance.LOW
                
                insight.update_confidence_level()
                insights.append(insight)
        
        return insights
    
    def _extract_target_distribution(self, data: pd.DataFrame, target_variable: str, config: InsightExtractionConfig) -> List[Insight]:
        """提取目标变量分布洞察"""
        insights = []
        
        if target_variable not in data.columns:
            return insights
        
        target_data = data[target_variable].dropna()
        if len(target_data) < config.min_sample_size:
            return insights
        
        # 分布特征分析
        mean_val = target_data.mean()
        std_val = target_data.std()
        skewness = stats.skew(target_data)
        kurtosis = stats.kurtosis(target_data)
        
        # 正态性检验
        if len(target_data) >= 8:
            _, p_value = stats.normaltest(target_data)
        else:
            p_value = None
        
        insight = Insight(
            insight_id=f"target_distribution_{target_variable}",
            title=f"目标变量 {target_variable} 的分布特征",
            description=f"目标变量 {target_variable} 的均值为 {mean_val:.2f}，标准差为 {std_val:.2f}，偏度为 {skewness:.2f}",
            insight_type=InsightType.DISTRIBUTION,
            confidence_score=0.9,  # 分布分析通常很可靠
            statistical_significance=p_value,
            sample_size=len(target_data),
            involved_variables=[target_variable],
            extraction_method="target_distribution_analysis"
        )
        
        evidence = InsightEvidence(
            evidence_type="target_distribution",
            description="目标变量分布分析",
            statistical_value=skewness,
            p_value=p_value,
            sample_size=len(target_data),
            supporting_data={
                'mean': float(mean_val),
                'std': float(std_val),
                'skewness': float(skewness),
                'kurtosis': float(kurtosis),
                'min': float(target_data.min()),
                'max': float(target_data.max()),
                'median': float(target_data.median())
            }
        )
        insight.add_evidence(evidence)
        
        # 目标变量分布通常很重要
        insight.significance = InsightSignificance.HIGH
        insight.update_confidence_level()
        insights.append(insight)
        
        return insights   
 
    # 辅助方法
    def _calculate_seasonal_strength(self, values: np.ndarray, period: int) -> float:
        """计算季节性强度"""
        if len(values) < period * 2:
            return 0.0
        
        # 简单的季节性强度计算
        seasonal_means = []
        for i in range(period):
            seasonal_values = values[i::period]
            if len(seasonal_values) > 0:
                seasonal_means.append(np.mean(seasonal_values))
        
        if len(seasonal_means) < 2:
            return 0.0
        
        # 计算季节性变异与总变异的比值
        seasonal_var = np.var(seasonal_means)
        total_var = np.var(values)
        
        if total_var == 0:
            return 0.0
        
        return min(seasonal_var / total_var, 1.0)
    
    def _filter_insights(self, insights: List[Insight], config: InsightExtractionConfig) -> List[Insight]:
        """过滤洞察"""
        filtered = []
        
        for insight in insights:
            # 置信度过滤
            if insight.confidence_score < config.min_confidence_score:
                continue
            
            # 重要性过滤
            if insight.significance.value < config.min_significance.value:
                continue
            
            filtered.append(insight)
        
        # 按类型限制数量
        type_counts = {}
        final_insights = []
        
        for insight in filtered:
            insight_type = insight.insight_type.value
            if type_counts.get(insight_type, 0) < config.max_insights_per_type:
                final_insights.append(insight)
                type_counts[insight_type] = type_counts.get(insight_type, 0) + 1
        
        return final_insights
    
    def _rank_insights(self, insights: List[Insight]) -> List[Insight]:
        """对洞察进行排序"""
        return sorted(insights, key=lambda x: x.get_overall_quality_score(), reverse=True)
    
    def _count_insights_by_type(self, insights: List[Insight]) -> Dict[str, int]:
        """按类型统计洞察数量"""
        counts = {}
        for insight in insights:
            insight_type = insight.insight_type.value
            counts[insight_type] = counts.get(insight_type, 0) + 1
        return counts
    
    def _count_insights_by_significance(self, insights: List[Insight]) -> Dict[str, int]:
        """按重要性统计洞察数量"""
        counts = {}
        for insight in insights:
            significance = insight.significance.value
            counts[significance] = counts.get(significance, 0) + 1
        return counts
    
    def validate_insight(self, insight: Insight, validation_data: pd.DataFrame = None) -> bool:
        """验证洞察的有效性"""
        try:
            # 基本验证
            if not insight.title or not insight.description:
                return False
            
            if insight.confidence_score < 0 or insight.confidence_score > 1:
                return False
            
            if insight.sample_size < 1:
                return False
            
            # 如果有验证数据，进行更深入的验证
            if validation_data is not None:
                # 检查涉及的变量是否存在
                for var in insight.involved_variables:
                    if var not in validation_data.columns:
                        return False
                
                # 根据洞察类型进行特定验证
                if insight.insight_type == InsightType.CORRELATION:
                    return self._validate_correlation_insight(insight, validation_data)
                elif insight.insight_type == InsightType.TREND:
                    return self._validate_trend_insight(insight, validation_data)
                # 可以添加更多类型的验证
            
            insight.validation_status = "validated"
            return True
            
        except Exception as e:
            self.logger.warning(f"洞察验证失败: {str(e)}")
            insight.validation_status = "rejected"
            return False
    
    def _validate_correlation_insight(self, insight: Insight, data: pd.DataFrame) -> bool:
        """验证相关性洞察"""
        if len(insight.involved_variables) != 2:
            return False
        
        var1, var2 = insight.involved_variables
        if var1 not in data.columns or var2 not in data.columns:
            return False
        
        # 重新计算相关性
        correlation = data[[var1, var2]].corr().iloc[0, 1]
        
        # 检查是否与原洞察一致（允许一定误差）
        original_correlation = None
        for evidence in insight.evidence:
            if evidence.statistical_value is not None:
                original_correlation = evidence.statistical_value
                break
        
        if original_correlation is not None:
            return abs(correlation - original_correlation) < 0.1
        
        return True
    
    def _validate_trend_insight(self, insight: Insight, data: pd.DataFrame) -> bool:
        """验证趋势洞察"""
        if len(insight.involved_variables) != 1:
            return False
        
        var = insight.involved_variables[0]
        if var not in data.columns:
            return False
        
        # 简单验证：检查数据是否足够
        return len(data[var].dropna()) >= 10
    
    def generate_insight_report(self, result: ExtractionResult) -> str:
        """生成洞察报告"""
        if not result.success:
            return f"洞察提取失败: {result.error_message}"
        
        report_lines = [
            "=== 数据洞察报告 ===",
            f"提取时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"分析数据点: {result.data_points_analyzed}",
            f"提取洞察总数: {result.total_insights}",
            f"平均置信度: {result.average_confidence:.2f}",
            f"平均质量评分: {result.average_quality_score:.2f}",
            f"提取耗时: {result.extraction_time_ms:.2f} ms",
            "",
            "=== 洞察类型分布 ===",
        ]
        
        for insight_type, count in result.insights_by_type.items():
            report_lines.append(f"{insight_type}: {count} 个")
        
        report_lines.extend([
            "",
            "=== 重要性分布 ===",
        ])
        
        for significance, count in result.insights_by_significance.items():
            report_lines.append(f"{significance}: {count} 个")
        
        # 展示前10个最重要的洞察
        top_insights = result.get_top_insights(10)
        if top_insights:
            report_lines.extend([
                "",
                "=== 重要洞察 ===",
            ])
            
            for i, insight in enumerate(top_insights, 1):
                report_lines.append(f"{i}. {insight.title}")
                report_lines.append(f"   {insight.description}")
                report_lines.append(f"   置信度: {insight.confidence_score:.2f}, 重要性: {insight.significance.value}")
                if insight.recommended_actions:
                    report_lines.append(f"   建议: {'; '.join(insight.recommended_actions)}")
                report_lines.append("")
        
        return "\n".join(report_lines)
    
    def export_insights_to_json(self, result: ExtractionResult, file_path: str) -> bool:
        """导出洞察到JSON文件"""
        try:
            export_data = {
                'extraction_info': {
                    'success': result.success,
                    'total_insights': result.total_insights,
                    'data_points_analyzed': result.data_points_analyzed,
                    'extraction_time_ms': result.extraction_time_ms,
                    'average_confidence': result.average_confidence,
                    'average_quality_score': result.average_quality_score,
                    'extraction_timestamp': datetime.datetime.now().isoformat()
                },
                'insights': []
            }
            
            for insight in result.insights:
                insight_data = {
                    'insight_id': insight.insight_id,
                    'title': insight.title,
                    'description': insight.description,
                    'type': insight.insight_type.value,
                    'significance': insight.significance.value,
                    'confidence_score': insight.confidence_score,
                    'confidence_level': insight.confidence_level.value,
                    'novelty_score': insight.novelty_score,
                    'actionability_score': insight.actionability_score,
                    'overall_quality_score': insight.get_overall_quality_score(),
                    'statistical_significance': insight.statistical_significance,
                    'effect_size': insight.effect_size,
                    'sample_size': insight.sample_size,
                    'involved_variables': insight.involved_variables,
                    'affected_records': insight.affected_records,
                    'business_impact': insight.business_impact,
                    'recommended_actions': insight.recommended_actions,
                    'extraction_method': insight.extraction_method,
                    'extraction_time': insight.extraction_time.isoformat(),
                    'validation_status': insight.validation_status,
                    'evidence': []
                }
                
                # 添加证据信息
                for evidence in insight.evidence:
                    evidence_data = {
                        'evidence_type': evidence.evidence_type,
                        'description': evidence.description,
                        'statistical_value': evidence.statistical_value,
                        'p_value': evidence.p_value,
                        'confidence_interval': evidence.confidence_interval,
                        'sample_size': evidence.sample_size,
                        'effect_size': evidence.effect_size,
                        'supporting_data': evidence.supporting_data
                    }
                    insight_data['evidence'].append(evidence_data)
                
                export_data['insights'].append(insight_data)
            
            # 写入文件
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(export_data, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"洞察数据已导出到: {file_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"导出洞察数据失败: {str(e)}")
            return False
    
    def get_actionable_insights(self, result: ExtractionResult, min_actionability: float = 0.5) -> List[Insight]:
        """获取可操作的洞察"""
        actionable = []
        
        for insight in result.insights:
            if insight.actionability_score >= min_actionability:
                # 为可操作洞察生成建议
                if not insight.recommended_actions:
                    insight.recommended_actions = self._generate_action_recommendations(insight)
                
                actionable.append(insight)
        
        return sorted(actionable, key=lambda x: x.actionability_score, reverse=True)
    
    def _generate_action_recommendations(self, insight: Insight) -> List[str]:
        """为洞察生成行动建议"""
        recommendations = []
        
        if insight.insight_type == InsightType.CORRELATION:
            if len(insight.involved_variables) == 2:
                var1, var2 = insight.involved_variables
                recommendations.append(f"进一步调查 {var1} 和 {var2} 之间的因果关系")
                recommendations.append(f"考虑在预测模型中使用 {var1} 来预测 {var2}")
        
        elif insight.insight_type == InsightType.TREND:
            if insight.involved_variables:
                var = insight.involved_variables[0]
                recommendations.append(f"监控 {var} 的趋势变化")
                recommendations.append(f"基于 {var} 的趋势制定相应的业务策略")
        
        elif insight.insight_type == InsightType.ANOMALY:
            recommendations.append("调查异常值的产生原因")
            recommendations.append("建立异常值监控机制")
            recommendations.append("考虑在数据预处理中处理异常值")
        
        elif insight.insight_type == InsightType.CLUSTERING:
            recommendations.append("基于聚类结果进行客户分群")
            recommendations.append("为不同聚类制定差异化策略")
        
        elif insight.insight_type == InsightType.COMPARISON:
            recommendations.append("深入分析组间差异的原因")
            recommendations.append("考虑针对不同组别采取不同措施")
        
        # 通用建议
        if insight.significance == InsightSignificance.CRITICAL:
            recommendations.append("此洞察具有关键重要性，建议立即采取行动")
        elif insight.significance == InsightSignificance.HIGH:
            recommendations.append("此洞察重要性较高，建议优先处理")
        
        return recommendations
    
    def update_insight_business_context(self, insight: Insight, business_context: Dict[str, Any]):
        """更新洞察的业务上下文"""
        # 根据业务上下文更新洞察的业务影响和可操作性
        if 'business_goals' in business_context:
            goals = business_context['business_goals']
            
            # 检查洞察是否与业务目标相关
            relevance_score = 0.0
            for goal in goals:
                if any(var in goal.lower() for var in insight.involved_variables):
                    relevance_score += 0.3
                if any(keyword in insight.description.lower() for keyword in goal.lower().split()):
                    relevance_score += 0.2
            
            insight.actionability_score = min(insight.actionability_score + relevance_score, 1.0)
        
        if 'domain_knowledge' in business_context:
            domain_info = business_context['domain_knowledge']
            
            # 根据领域知识更新洞察描述
            for var in insight.involved_variables:
                if var in domain_info:
                    var_info = domain_info[var]
                    if 'business_meaning' in var_info:
                        insight.business_impact += f" {var} 在业务中表示 {var_info['business_meaning']}。"
        
        # 更新推荐行动
        if not insight.recommended_actions:
            insight.recommended_actions = self._generate_action_recommendations(insight)