"""
异常检测系统

提供多维度异常识别算法、异常标记和处理建议功能
"""

import logging
import json
import numpy as np
import pandas as pd
from typing import Dict, List, Optional, Any, Tuple, Union, Callable
from dataclasses import dataclass, field
from enum import Enum
import datetime
import math
import statistics
from pathlib import Path
from scipy import stats
from sklearn.ensemble import IsolationForest
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.covariance import EllipticEnvelope

from ..models.base_models import BaseModel
from .base_component import BaseComponent

logger = logging.getLogger(__name__)


class AnomalyType(Enum):
    """异常类型"""
    STATISTICAL = "statistical"         # 统计异常
    CONTEXTUAL = "contextual"          # 上下文异常
    COLLECTIVE = "collective"          # 集体异常
    POINT = "point"                    # 点异常
    SEASONAL = "seasonal"              # 季节性异常
    TREND = "trend"                    # 趋势异常
    DISTRIBUTION = "distribution"       # 分布异常
    CORRELATION = "correlation"        # 相关性异常
    PATTERN = "pattern"                # 模式异常
    OUTLIER = "outlier"                # 离群值


class DetectionMethod(Enum):
    """检测方法"""
    Z_SCORE = "z_score"                # Z分数法
    IQR = "iqr"                        # 四分位距法
    ISOLATION_FOREST = "isolation_forest"  # 孤立森林
    DBSCAN = "dbscan"                  # 密度聚类
    ELLIPTIC_ENVELOPE = "elliptic_envelope"  # 椭圆包络
    LOCAL_OUTLIER_FACTOR = "lof"       # 局部异常因子
    ONE_CLASS_SVM = "one_class_svm"    # 单类支持向量机
    AUTOENCODER = "autoencoder"        # 自编码器
    LSTM = "lstm"                      # 长短期记忆网络
    STATISTICAL_TEST = "statistical_test"  # 统计检验


class AnomalySeverity(Enum):
    """异常严重程度"""
    LOW = "low"                        # 低
    MEDIUM = "medium"                  # 中
    HIGH = "high"                      # 高
    CRITICAL = "critical"              # 严重


@dataclass
class AnomalyPoint(BaseModel):
    """异常点"""
    # 基本信息
    index: int = 0
    value: Any = None
    column: str = ""
    
    # 异常特征
    anomaly_type: AnomalyType = AnomalyType.POINT
    severity: AnomalySeverity = AnomalySeverity.MEDIUM
    confidence: float = 0.0
    
    # 检测信息
    detection_method: DetectionMethod = DetectionMethod.Z_SCORE
    detection_time: datetime.datetime = field(default_factory=datetime.datetime.now)
    
    # 统计信息
    z_score: Optional[float] = None
    percentile: Optional[float] = None
    distance_to_center: Optional[float] = None
    
    # 上下文信息
    expected_value: Optional[Any] = None
    expected_range: Optional[Tuple[float, float]] = None
    context_features: Dict[str, Any] = field(default_factory=dict)
    
    # 处理建议
    recommended_action: str = ""
    processing_priority: int = 1
    
    def get_anomaly_score(self) -> float:
        """获取异常评分"""
        base_score = self.confidence
        
        # 根据严重程度调整
        severity_multiplier = {
            AnomalySeverity.LOW: 0.5,
            AnomalySeverity.MEDIUM: 1.0,
            AnomalySeverity.HIGH: 1.5,
            AnomalySeverity.CRITICAL: 2.0
        }
        
        return base_score * severity_multiplier.get(self.severity, 1.0)


@dataclass
class AnomalyCluster(BaseModel):
    """异常聚类"""
    cluster_id: str = ""
    anomaly_points: List[AnomalyPoint] = field(default_factory=list)
    cluster_type: AnomalyType = AnomalyType.COLLECTIVE
    
    # 聚类特征
    center: Optional[List[float]] = None
    radius: float = 0.0
    density: float = 0.0
    
    # 时间特征
    start_time: Optional[datetime.datetime] = None
    end_time: Optional[datetime.datetime] = None
    duration: Optional[float] = None
    
    # 统计特征
    size: int = 0
    avg_severity: float = 0.0
    max_severity: AnomalySeverity = AnomalySeverity.LOW
    
    def update_statistics(self):
        """更新聚类统计信息"""
        if not self.anomaly_points:
            return
        
        self.size = len(self.anomaly_points)
        
        # 计算平均严重程度
        severity_scores = []
        for point in self.anomaly_points:
            severity_map = {
                AnomalySeverity.LOW: 1,
                AnomalySeverity.MEDIUM: 2,
                AnomalySeverity.HIGH: 3,
                AnomalySeverity.CRITICAL: 4
            }
            severity_scores.append(severity_map.get(point.severity, 2))
        
        self.avg_severity = sum(severity_scores) / len(severity_scores)
        
        # 找到最高严重程度
        max_severity_score = max(severity_scores)
        severity_reverse_map = {1: AnomalySeverity.LOW, 2: AnomalySeverity.MEDIUM, 
                               3: AnomalySeverity.HIGH, 4: AnomalySeverity.CRITICAL}
        self.max_severity = severity_reverse_map[max_severity_score]


@dataclass
class DetectionConfig(BaseModel):
    """检测配置"""
    # 检测方法配置
    methods: List[DetectionMethod] = field(default_factory=lambda: [DetectionMethod.Z_SCORE, DetectionMethod.IQR])
    
    # 阈值配置
    z_score_threshold: float = 3.0
    iqr_multiplier: float = 1.5
    contamination: float = 0.1  # 异常比例
    
    # 聚类配置
    eps: float = 0.5  # DBSCAN参数
    min_samples: int = 5  # DBSCAN参数
    
    # 时间序列配置
    window_size: int = 10
    seasonal_period: Optional[int] = None
    
    # 多维度配置
    enable_multivariate: bool = True
    correlation_threshold: float = 0.8
    
    # 过滤配置
    min_confidence: float = 0.5
    severity_filter: List[AnomalySeverity] = field(default_factory=list)
    
    # 处理配置
    auto_clustering: bool = True
    generate_recommendations: bool = True


@dataclass
class DetectionResult(BaseModel):
    """检测结果"""
    success: bool = True
    error_message: str = ""
    
    # 检测统计
    total_points: int = 0
    anomaly_points: List[AnomalyPoint] = field(default_factory=list)
    anomaly_clusters: List[AnomalyCluster] = field(default_factory=list)
    
    # 性能统计
    detection_time_ms: float = 0.0
    methods_used: List[DetectionMethod] = field(default_factory=list)
    
    # 质量评估
    detection_accuracy: float = 0.0
    false_positive_rate: float = 0.0
    coverage: float = 0.0
    
    # 汇总信息
    anomaly_summary: Dict[str, Any] = field(default_factory=dict)
    
    def get_anomaly_count(self) -> int:
        """获取异常点数量"""
        return len(self.anomaly_points)
    
    def get_anomaly_rate(self) -> float:
        """获取异常率"""
        return self.get_anomaly_count() / self.total_points if self.total_points > 0 else 0.0
    
    def get_severity_distribution(self) -> Dict[str, int]:
        """获取严重程度分布"""
        distribution = {}
        for point in self.anomaly_points:
            severity = point.severity.value
            distribution[severity] = distribution.get(severity, 0) + 1
        return distribution
    
    def get_method_performance(self) -> Dict[str, Dict[str, float]]:
        """获取方法性能统计"""
        method_stats = {}
        
        for method in self.methods_used:
            method_points = [p for p in self.anomaly_points if p.detection_method == method]
            
            if method_points:
                avg_confidence = sum(p.confidence for p in method_points) / len(method_points)
                method_stats[method.value] = {
                    'count': len(method_points),
                    'avg_confidence': avg_confidence,
                    'coverage': len(method_points) / len(self.anomaly_points)
                }
        
        return method_stats


class AnomalyDetectionSystem(BaseComponent):
    """异常检测系统"""
    
    def get_required_configs(self) -> List[str]:
        """获取必需的配置项"""
        return []
    
    def _setup_component(self):
        """设置组件特定的初始化逻辑"""
        self.logger.info("异常检测系统初始化")
        
        # 注册检测方法
        self.detection_methods = {
            DetectionMethod.Z_SCORE: self._detect_z_score_anomalies,
            DetectionMethod.IQR: self._detect_iqr_anomalies,
            DetectionMethod.ISOLATION_FOREST: self._detect_isolation_forest_anomalies,
            DetectionMethod.DBSCAN: self._detect_dbscan_anomalies,
            DetectionMethod.ELLIPTIC_ENVELOPE: self._detect_elliptic_envelope_anomalies,
            DetectionMethod.STATISTICAL_TEST: self._detect_statistical_test_anomalies
        }
        
        # 异常处理建议模板
        self.recommendation_templates = {
            AnomalyType.STATISTICAL: "建议检查数据收集过程，可能存在测量误差或录入错误",
            AnomalyType.CONTEXTUAL: "建议结合业务背景分析，可能是正常的特殊情况",
            AnomalyType.COLLECTIVE: "建议分析群体行为模式，可能存在系统性问题",
            AnomalyType.POINT: "建议单独处理该数据点，可能是偶发事件",
            AnomalyType.SEASONAL: "建议考虑季节性因素，可能需要季节性调整",
            AnomalyType.TREND: "建议分析趋势变化原因，可能需要模型更新",
            AnomalyType.DISTRIBUTION: "建议检查数据分布假设，可能需要变换处理",
            AnomalyType.CORRELATION: "建议分析变量关系变化，可能存在结构性改变",
            AnomalyType.PATTERN: "建议识别新的数据模式，可能需要规则更新",
            AnomalyType.OUTLIER: "建议评估是否为真实异常值，考虑删除或修正"
        }
        
        # 缓存检测结果
        self.detection_cache: Dict[str, DetectionResult] = {}
        
        self.logger.info("异常检测系统初始化完成")    
   
 def detect_anomalies(self, 
                        data: pd.DataFrame, 
                        config: DetectionConfig = None) -> DetectionResult:
        """
        检测数据异常
        
        Args:
            data: 输入数据DataFrame
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            start_time = datetime.datetime.now()
            
            if config is None:
                config = DetectionConfig()
            
            # 检查缓存
            cache_key = self._generate_cache_key(data, config)
            if cache_key in self.detection_cache:
                cached_result = self.detection_cache[cache_key]
                self.logger.info("使用缓存的异常检测结果")
                return cached_result
            
            # 初始化结果
            result = DetectionResult(
                total_points=len(data),
                methods_used=config.methods
            )
            
            all_anomalies = []
            
            # 使用多种方法检测异常
            for method in config.methods:
                if method in self.detection_methods:
                    method_anomalies = self.detection_methods[method](data, config)
                    all_anomalies.extend(method_anomalies)
                    self.logger.info(f"使用 {method.value} 方法检测到 {len(method_anomalies)} 个异常点")
            
            # 合并和去重异常点
            result.anomaly_points = self._merge_anomalies(all_anomalies, config)
            
            # 异常聚类
            if config.auto_clustering and result.anomaly_points:
                result.anomaly_clusters = self._cluster_anomalies(result.anomaly_points, config)
            
            # 生成处理建议
            if config.generate_recommendations:
                self._generate_recommendations(result.anomaly_points)
            
            # 计算执行时间
            execution_time = (datetime.datetime.now() - start_time).total_seconds() * 1000
            result.detection_time_ms = execution_time
            
            # 生成汇总信息
            result.anomaly_summary = self._generate_summary(result)
            
            # 缓存结果
            self.detection_cache[cache_key] = result
            
            self.logger.info(f"异常检测完成: 发现 {len(result.anomaly_points)} 个异常点 (耗时: {execution_time:.2f}ms)")
            return result
            
        except Exception as e:
            self.logger.error(f"异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    def detect_time_series_anomalies(self, 
                                   data: pd.Series, 
                                   timestamp_column: str = None,
                                   config: DetectionConfig = None) -> DetectionResult:
        """
        检测时间序列异常
        
        Args:
            data: 时间序列数据
            timestamp_column: 时间戳列名
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            if config is None:
                config = DetectionConfig()
            
            # 转换为DataFrame格式
            if isinstance(data, pd.Series):
                df = data.to_frame('value')
                if timestamp_column and timestamp_column in data.index.names:
                    df.reset_index(inplace=True)
            else:
                df = data.copy()
            
            # 时间序列特定的异常检测
            anomalies = []
            
            # 1. 季节性异常检测
            if config.seasonal_period:
                seasonal_anomalies = self._detect_seasonal_anomalies(df, config)
                anomalies.extend(seasonal_anomalies)
            
            # 2. 趋势异常检测
            trend_anomalies = self._detect_trend_anomalies(df, config)
            anomalies.extend(trend_anomalies)
            
            # 3. 滑动窗口异常检测
            window_anomalies = self._detect_window_anomalies(df, config)
            anomalies.extend(window_anomalies)
            
            # 创建结果
            result = DetectionResult(
                total_points=len(df),
                anomaly_points=self._merge_anomalies(anomalies, config),
                methods_used=[DetectionMethod.STATISTICAL_TEST]
            )
            
            # 生成汇总信息
            result.anomaly_summary = self._generate_summary(result)
            
            self.logger.info(f"时间序列异常检测完成: 发现 {len(result.anomaly_points)} 个异常点")
            return result
            
        except Exception as e:
            self.logger.error(f"时间序列异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    def detect_multivariate_anomalies(self, 
                                    data: pd.DataFrame, 
                                    config: DetectionConfig = None) -> DetectionResult:
        """
        检测多变量异常
        
        Args:
            data: 多变量数据
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            if config is None:
                config = DetectionConfig()
            
            if not config.enable_multivariate:
                return self.detect_anomalies(data, config)
            
            # 选择数值列
            numeric_columns = data.select_dtypes(include=[np.number]).columns
            if len(numeric_columns) < 2:
                self.logger.warning("多变量异常检测需要至少2个数值列")
                return self.detect_anomalies(data, config)
            
            numeric_data = data[numeric_columns].dropna()
            
            anomalies = []
            
            # 1. 相关性异常检测
            correlation_anomalies = self._detect_correlation_anomalies(numeric_data, config)
            anomalies.extend(correlation_anomalies)
            
            # 2. 马氏距离异常检测
            mahalanobis_anomalies = self._detect_mahalanobis_anomalies(numeric_data, config)
            anomalies.extend(mahalanobis_anomalies)
            
            # 3. 主成分分析异常检测
            pca_anomalies = self._detect_pca_anomalies(numeric_data, config)
            anomalies.extend(pca_anomalies)
            
            # 创建结果
            result = DetectionResult(
                total_points=len(data),
                anomaly_points=self._merge_anomalies(anomalies, config),
                methods_used=[DetectionMethod.ELLIPTIC_ENVELOPE, DetectionMethod.ISOLATION_FOREST]
            )
            
            # 异常聚类
            if config.auto_clustering and result.anomaly_points:
                result.anomaly_clusters = self._cluster_anomalies(result.anomaly_points, config)
            
            # 生成汇总信息
            result.anomaly_summary = self._generate_summary(result)
            
            self.logger.info(f"多变量异常检测完成: 发现 {len(result.anomaly_points)} 个异常点")
            return result
            
        except Exception as e:
            self.logger.error(f"多变量异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    # 具体检测方法实现
    def _detect_z_score_anomalies(self, 
                                 data: pd.DataFrame, 
                                 config: DetectionConfig) -> List[AnomalyPoint]:
        """Z分数异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 3:
                continue
            
            mean_val = values.mean()
            std_val = values.std()
            
            if std_val == 0:
                continue
            
            z_scores = np.abs((values - mean_val) / std_val)
            anomaly_indices = values[z_scores > config.z_score_threshold].index
            
            for idx in anomaly_indices:
                z_score = z_scores.loc[idx]
                
                # 确定严重程度
                if z_score > 4:
                    severity = AnomalySeverity.CRITICAL
                elif z_score > 3.5:
                    severity = AnomalySeverity.HIGH
                elif z_score > 3:
                    severity = AnomalySeverity.MEDIUM
                else:
                    severity = AnomalySeverity.LOW
                
                anomaly = AnomalyPoint(
                    index=idx,
                    value=values.loc[idx],
                    column=column,
                    anomaly_type=AnomalyType.STATISTICAL,
                    severity=severity,
                    confidence=min(1.0, z_score / 5.0),
                    detection_method=DetectionMethod.Z_SCORE,
                    z_score=z_score,
                    expected_value=mean_val,
                    expected_range=(mean_val - 2*std_val, mean_val + 2*std_val)
                )
                
                anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_iqr_anomalies(self, 
                            data: pd.DataFrame, 
                            config: DetectionConfig) -> List[AnomalyPoint]:
        """IQR异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 4:
                continue
            
            Q1 = values.quantile(0.25)
            Q3 = values.quantile(0.75)
            IQR = Q3 - Q1
            
            if IQR == 0:
                continue
            
            lower_bound = Q1 - config.iqr_multiplier * IQR
            upper_bound = Q3 + config.iqr_multiplier * IQR
            
            outliers = values[(values < lower_bound) | (values > upper_bound)]
            
            for idx, value in outliers.items():
                # 计算距离边界的程度
                if value < lower_bound:
                    distance = (lower_bound - value) / IQR
                else:
                    distance = (value - upper_bound) / IQR
                
                # 确定严重程度
                if distance > 3:
                    severity = AnomalySeverity.CRITICAL
                elif distance > 2:
                    severity = AnomalySeverity.HIGH
                elif distance > 1:
                    severity = AnomalySeverity.MEDIUM
                else:
                    severity = AnomalySeverity.LOW
                
                anomaly = AnomalyPoint(
                    index=idx,
                    value=value,
                    column=column,
                    anomaly_type=AnomalyType.OUTLIER,
                    severity=severity,
                    confidence=min(1.0, distance / 3.0),
                    detection_method=DetectionMethod.IQR,
                    percentile=stats.percentileofscore(values, value) / 100.0,
                    expected_range=(lower_bound, upper_bound),
                    distance_to_center=distance
                )
                
                anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_isolation_forest_anomalies(self, 
                                         data: pd.DataFrame, 
                                         config: DetectionConfig) -> List[AnomalyPoint]:
        """孤立森林异常检测"""
        anomalies = []
        
        # 选择数值列
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if numeric_data.empty or len(numeric_data.columns) == 0:
            return anomalies
        
        try:
            # 标准化数据
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(numeric_data)
            
            # 孤立森林模型
            iso_forest = IsolationForest(
                contamination=config.contamination,
                random_state=42,
                n_estimators=100
            )
            
            # 预测异常
            predictions = iso_forest.fit_predict(scaled_data)
            anomaly_scores = iso_forest.decision_function(scaled_data)
            
            # 找到异常点
            anomaly_indices = np.where(predictions == -1)[0]
            
            for idx in anomaly_indices:
                original_idx = numeric_data.index[idx]
                score = abs(anomaly_scores[idx])
                
                # 确定严重程度
                if score > 0.6:
                    severity = AnomalySeverity.CRITICAL
                elif score > 0.4:
                    severity = AnomalySeverity.HIGH
                elif score > 0.2:
                    severity = AnomalySeverity.MEDIUM
                else:
                    severity = AnomalySeverity.LOW
                
                # 找到最异常的列
                row_data = numeric_data.iloc[idx]
                most_anomalous_col = row_data.abs().idxmax()
                
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=row_data[most_anomalous_col],
                    column=most_anomalous_col,
                    anomaly_type=AnomalyType.POINT,
                    severity=severity,
                    confidence=min(1.0, score),
                    detection_method=DetectionMethod.ISOLATION_FOREST,
                    distance_to_center=score,
                    context_features=row_data.to_dict()
                )
                
                anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"孤立森林异常检测失败: {str(e)}")
        
        return anomalies
    
    def _detect_dbscan_anomalies(self, 
                               data: pd.DataFrame, 
                               config: DetectionConfig) -> List[AnomalyPoint]:
        """DBSCAN密度聚类异常检测"""
        anomalies = []
        
        # 选择数值列
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if numeric_data.empty or len(numeric_data.columns) == 0:
            return anomalies
        
        try:
            # 标准化数据
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(numeric_data)
            
            # DBSCAN聚类
            dbscan = DBSCAN(eps=config.eps, min_samples=config.min_samples)
            cluster_labels = dbscan.fit_predict(scaled_data)
            
            # 找到噪声点（标签为-1）
            noise_indices = np.where(cluster_labels == -1)[0]
            
            for idx in noise_indices:
                original_idx = numeric_data.index[idx]
                row_data = numeric_data.iloc[idx]
                
                # 计算到最近聚类中心的距离
                non_noise_indices = np.where(cluster_labels != -1)[0]
                if len(non_noise_indices) > 0:
                    distances = []
                    for cluster_id in set(cluster_labels[non_noise_indices]):
                        cluster_points = scaled_data[cluster_labels == cluster_id]
                        cluster_center = np.mean(cluster_points, axis=0)
                        distance = np.linalg.norm(scaled_data[idx] - cluster_center)
                        distances.append(distance)
                    
                    min_distance = min(distances) if distances else 1.0
                else:
                    min_distance = 1.0
                
                # 确定严重程度
                if min_distance > 3:
                    severity = AnomalySeverity.CRITICAL
                elif min_distance > 2:
                    severity = AnomalySeverity.HIGH
                elif min_distance > 1:
                    severity = AnomalySeverity.MEDIUM
                else:
                    severity = AnomalySeverity.LOW
                
                # 找到最异常的列
                most_anomalous_col = row_data.abs().idxmax()
                
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=row_data[most_anomalous_col],
                    column=most_anomalous_col,
                    anomaly_type=AnomalyType.COLLECTIVE,
                    severity=severity,
                    confidence=min(1.0, min_distance / 3.0),
                    detection_method=DetectionMethod.DBSCAN,
                    distance_to_center=min_distance,
                    context_features=row_data.to_dict()
                )
                
                anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"DBSCAN异常检测失败: {str(e)}")
        
        return anomalies
    
    def _detect_elliptic_envelope_anomalies(self, 
                                          data: pd.DataFrame, 
                                          config: DetectionConfig) -> List[AnomalyPoint]:
        """椭圆包络异常检测"""
        anomalies = []
        
        # 选择数值列
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if numeric_data.empty or len(numeric_data.columns) < 2:
            return anomalies
        
        try:
            # 椭圆包络模型
            envelope = EllipticEnvelope(contamination=config.contamination, random_state=42)
            predictions = envelope.fit_predict(numeric_data)
            
            # 找到异常点
            anomaly_indices = np.where(predictions == -1)[0]
            
            for idx in anomaly_indices:
                original_idx = numeric_data.index[idx]
                row_data = numeric_data.iloc[idx]
                
                # 计算马氏距离
                try:
                    mahal_distance = envelope.mahalanobis(row_data.values.reshape(1, -1))[0]
                except:
                    mahal_distance = 1.0
                
                # 确定严重程度
                if mahal_distance > 10:
                    severity = AnomalySeverity.CRITICAL
                elif mahal_distance > 7:
                    severity = AnomalySeverity.HIGH
                elif mahal_distance > 5:
                    severity = AnomalySeverity.MEDIUM
                else:
                    severity = AnomalySeverity.LOW
                
                # 找到最异常的列
                most_anomalous_col = row_data.abs().idxmax()
                
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=row_data[most_anomalous_col],
                    column=most_anomalous_col,
                    anomaly_type=AnomalyType.DISTRIBUTION,
                    severity=severity,
                    confidence=min(1.0, mahal_distance / 10.0),
                    detection_method=DetectionMethod.ELLIPTIC_ENVELOPE,
                    distance_to_center=mahal_distance,
                    context_features=row_data.to_dict()
                )
                
                anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"椭圆包络异常检测失败: {str(e)}")
        
        return anomalies
    
    def _detect_statistical_test_anomalies(self, 
                                         data: pd.DataFrame, 
                                         config: DetectionConfig) -> List[AnomalyPoint]:
        """统计检验异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 10:
                continue
            
            try:
                # Grubbs检验（检测单个异常值）
                grubbs_anomalies = self._grubbs_test(values, column)
                anomalies.extend(grubbs_anomalies)
                
                # 正态性检验
                normality_anomalies = self._normality_test(values, column)
                anomalies.extend(normality_anomalies)
                
            except Exception as e:
                self.logger.error(f"统计检验异常检测失败 ({column}): {str(e)}")
        
        return anomalies    

    def _grubbs_test(self, values: pd.Series, column: str) -> List[AnomalyPoint]:
        """Grubbs检验检测异常值"""
        anomalies = []
        
        if len(values) < 3:
            return anomalies
        
        mean_val = values.mean()
        std_val = values.std()
        
        if std_val == 0:
            return anomalies
        
        # 计算Grubbs统计量
        grubbs_stats = np.abs((values - mean_val) / std_val)
        max_grubbs = grubbs_stats.max()
        max_idx = grubbs_stats.idxmax()
        
        # Grubbs临界值（近似）
        n = len(values)
        alpha = 0.05
        t_critical = stats.t.ppf(1 - alpha/(2*n), n-2)
        grubbs_critical = ((n-1) / np.sqrt(n)) * np.sqrt(t_critical**2 / (n-2 + t_critical**2))
        
        if max_grubbs > grubbs_critical:
            anomaly = AnomalyPoint(
                index=max_idx,
                value=values.loc[max_idx],
                column=column,
                anomaly_type=AnomalyType.STATISTICAL,
                severity=AnomalySeverity.HIGH if max_grubbs > 1.5 * grubbs_critical else AnomalySeverity.MEDIUM,
                confidence=min(1.0, max_grubbs / grubbs_critical),
                detection_method=DetectionMethod.STATISTICAL_TEST,
                z_score=max_grubbs,
                expected_value=mean_val
            )
            anomalies.append(anomaly)
        
        return anomalies
    
    def _normality_test(self, values: pd.Series, column: str) -> List[AnomalyPoint]:
        """正态性检验"""
        anomalies = []
        
        try:
            # Shapiro-Wilk检验
            stat, p_value = stats.shapiro(values)
            
            if p_value < 0.01:  # 强烈拒绝正态性
                # 找到偏离正态分布最严重的点
                z_scores = np.abs(stats.zscore(values))
                max_z_idx = z_scores.argmax()
                max_z_value = z_scores.iloc[max_z_idx] if hasattr(z_scores, 'iloc') else z_scores[max_z_idx]
                
                if max_z_value > 2:
                    anomaly = AnomalyPoint(
                        index=values.index[max_z_idx],
                        value=values.iloc[max_z_idx],
                        column=column,
                        anomaly_type=AnomalyType.DISTRIBUTION,
                        severity=AnomalySeverity.MEDIUM,
                        confidence=1 - p_value,
                        detection_method=DetectionMethod.STATISTICAL_TEST,
                        z_score=max_z_value,
                        context_features={'shapiro_stat': stat, 'p_value': p_value}
                    )
                    anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"正态性检验失败: {str(e)}")
        
        return anomalies
    
    # 时间序列异常检测方法
    def _detect_seasonal_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """检测季节性异常"""
        anomalies = []
        
        if not config.seasonal_period or len(data) < config.seasonal_period * 2:
            return anomalies
        
        try:
            for column in data.select_dtypes(include=[np.number]).columns:
                values = data[column].dropna()
                
                # 计算季节性基线
                seasonal_means = []
                for i in range(config.seasonal_period):
                    seasonal_values = values[i::config.seasonal_period]
                    if len(seasonal_values) > 0:
                        seasonal_means.append(seasonal_values.mean())
                    else:
                        seasonal_means.append(0)
                
                # 检测偏离季节性模式的点
                for idx, value in values.items():
                    season_idx = idx % config.seasonal_period
                    expected_value = seasonal_means[season_idx]
                    
                    # 计算季节性偏差
                    if expected_value != 0:
                        deviation = abs(value - expected_value) / abs(expected_value)
                    else:
                        deviation = abs(value)
                    
                    if deviation > 0.5:  # 偏差超过50%
                        severity = AnomalySeverity.HIGH if deviation > 1.0 else AnomalySeverity.MEDIUM
                        
                        anomaly = AnomalyPoint(
                            index=idx,
                            value=value,
                            column=column,
                            anomaly_type=AnomalyType.SEASONAL,
                            severity=severity,
                            confidence=min(1.0, deviation),
                            detection_method=DetectionMethod.STATISTICAL_TEST,
                            expected_value=expected_value,
                            context_features={'seasonal_period': config.seasonal_period, 'season_index': season_idx}
                        )
                        anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"季节性异常检测失败: {str(e)}")
        
        return anomalies
    
    def _detect_trend_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """检测趋势异常"""
        anomalies = []
        
        try:
            for column in data.select_dtypes(include=[np.number]).columns:
                values = data[column].dropna()
                
                if len(values) < config.window_size:
                    continue
                
                # 计算移动平均和趋势
                rolling_mean = values.rolling(window=config.window_size).mean()
                rolling_std = values.rolling(window=config.window_size).std()
                
                # 检测偏离趋势的点
                for i in range(config.window_size, len(values)):
                    idx = values.index[i]
                    value = values.iloc[i]
                    expected = rolling_mean.iloc[i]
                    std_dev = rolling_std.iloc[i]
                    
                    if std_dev > 0:
                        z_score = abs(value - expected) / std_dev
                        
                        if z_score > 2.5:
                            severity = AnomalySeverity.HIGH if z_score > 3.5 else AnomalySeverity.MEDIUM
                            
                            anomaly = AnomalyPoint(
                                index=idx,
                                value=value,
                                column=column,
                                anomaly_type=AnomalyType.TREND,
                                severity=severity,
                                confidence=min(1.0, z_score / 4.0),
                                detection_method=DetectionMethod.STATISTICAL_TEST,
                                z_score=z_score,
                                expected_value=expected,
                                context_features={'window_size': config.window_size}
                            )
                            anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"趋势异常检测失败: {str(e)}")
        
        return anomalies
    
    def _detect_window_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """滑动窗口异常检测"""
        anomalies = []
        
        try:
            for column in data.select_dtypes(include=[np.number]).columns:
                values = data[column].dropna()
                
                if len(values) < config.window_size:
                    continue
                
                # 滑动窗口统计
                for i in range(config.window_size, len(values)):
                    window_data = values.iloc[i-config.window_size:i]
                    current_value = values.iloc[i]
                    current_idx = values.index[i]
                    
                    window_mean = window_data.mean()
                    window_std = window_data.std()
                    
                    if window_std > 0:
                        z_score = abs(current_value - window_mean) / window_std
                        
                        if z_score > 3.0:
                            severity = AnomalySeverity.HIGH if z_score > 4.0 else AnomalySeverity.MEDIUM
                            
                            anomaly = AnomalyPoint(
                                index=current_idx,
                                value=current_value,
                                column=column,
                                anomaly_type=AnomalyType.CONTEXTUAL,
                                severity=severity,
                                confidence=min(1.0, z_score / 5.0),
                                detection_method=DetectionMethod.STATISTICAL_TEST,
                                z_score=z_score,
                                expected_value=window_mean,
                                expected_range=(window_mean - 2*window_std, window_mean + 2*window_std),
                                context_features={'window_size': config.window_size, 'window_mean': window_mean, 'window_std': window_std}
                            )
                            anomalies.append(anomaly)
        
        except Exception as e:
            self.logger.error(f"滑动窗口异常检测失败: {str(e)}")
        
        return anomalies
    
    # 多变量异常检测方法
    def _detect_correlation_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """检测相关性异常"""
        anomalies = []
        
        try:
            # 计算相关性矩阵
            correlation_matrix = data.corr()
            
            # 检测异常相关性
            for i, col1 in enumerate(data.columns):
                for j, col2 in enumerate(data.columns[i+1:], i+1):
                    correlation = correlation_matrix.loc[col1, col2]
                    
                    # 检测异常高或异常低的相关性
                    if abs(correlation) > config.correlation_threshold:
                        # 找到偏离相关性模式的点
                        x_values = data[col1]
                        y_values = data[col2]
         
   
    def detect_anomalies(self, 
                        data: pd.DataFrame, 
                        config: DetectionConfig = None) -> DetectionResult:
        """
        检测数据异常
        
        Args:
            data: 输入数据DataFrame
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            start_time = datetime.datetime.now()
            
            if config is None:
                config = DetectionConfig()
            
            # 初始化结果
            result = DetectionResult(
                total_points=len(data),
                methods_used=config.methods
            )
            
            all_anomalies = []
            
            # 使用多种方法检测异常
            for method in config.methods:
                if method in self.detection_methods:
                    method_anomalies = self.detection_methods[method](data, config)
                    all_anomalies.extend(method_anomalies)
                    self.logger.info(f"使用 {method.value} 方法检测到 {len(method_anomalies)} 个异常点")
            
            # 合并和去重异常点
            result.anomaly_points = self._merge_anomalies(all_anomalies, config)
            
            # 异常聚类
            if config.auto_clustering and result.anomaly_points:
                result.anomaly_clusters = self._cluster_anomalies(result.anomaly_points, config)
            
            # 生成处理建议
            if config.generate_recommendations:
                self._generate_recommendations(result.anomaly_points)
            
            # 计算执行时间
            execution_time = (datetime.datetime.now() - start_time).total_seconds() * 1000
            result.detection_time_ms = execution_time
            
            # 生成汇总信息
            result.anomaly_summary = self._generate_summary(result)
            
            # 评估检测质量
            result.detection_accuracy = self._evaluate_detection_accuracy(result, data)
            result.coverage = self._calculate_coverage(result, data)
            
            self.logger.info(f"异常检测完成: 检测到 {len(result.anomaly_points)} 个异常点 (耗时: {execution_time:.2f}ms)")
            return result
            
        except Exception as e:
            self.logger.error(f"异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    def detect_multivariate_anomalies(self, 
                                    data: pd.DataFrame, 
                                    config: DetectionConfig = None) -> DetectionResult:
        """
        多变量异常检测
        
        Args:
            data: 输入数据DataFrame
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            if config is None:
                config = DetectionConfig()
            
            # 选择数值列
            numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
            if len(numeric_columns) < 2:
                raise ValueError("多变量异常检测需要至少2个数值列")
            
            numeric_data = data[numeric_columns].dropna()
            
            # 标准化数据
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(numeric_data)
            
            anomalies = []
            
            # 使用孤立森林
            if DetectionMethod.ISOLATION_FOREST in config.methods:
                iso_forest = IsolationForest(
                    contamination=config.contamination,
                    random_state=42
                )
                outlier_labels = iso_forest.fit_predict(scaled_data)
                anomaly_scores = iso_forest.decision_function(scaled_data)
                
                for i, (label, score) in enumerate(zip(outlier_labels, anomaly_scores)):
                    if label == -1:  # 异常点
                        anomaly = AnomalyPoint(
                            index=numeric_data.index[i],
                            value=numeric_data.iloc[i].to_dict(),
                            column="multivariate",
                            anomaly_type=AnomalyType.POINT,
                            detection_method=DetectionMethod.ISOLATION_FOREST,
                            confidence=abs(score),
                            distance_to_center=abs(score)
                        )
                        anomaly.severity = self._determine_severity(abs(score), 0.0, 1.0)
                        anomalies.append(anomaly)
            
            # 使用椭圆包络
            if DetectionMethod.ELLIPTIC_ENVELOPE in config.methods:
                envelope = EllipticEnvelope(contamination=config.contamination)
                outlier_labels = envelope.fit_predict(scaled_data)
                
                for i, label in enumerate(outlier_labels):
                    if label == -1:  # 异常点
                        # 计算马氏距离
                        point = scaled_data[i].reshape(1, -1)
                        mahal_dist = envelope.mahalanobis(point)[0]
                        
                        anomaly = AnomalyPoint(
                            index=numeric_data.index[i],
                            value=numeric_data.iloc[i].to_dict(),
                            column="multivariate",
                            anomaly_type=AnomalyType.STATISTICAL,
                            detection_method=DetectionMethod.ELLIPTIC_ENVELOPE,
                            confidence=min(mahal_dist / 10, 1.0),
                            distance_to_center=mahal_dist
                        )
                        anomaly.severity = self._determine_severity(mahal_dist, 0.0, 10.0)
                        anomalies.append(anomaly)
            
            # 创建结果
            result = DetectionResult(
                total_points=len(data),
                anomaly_points=anomalies,
                methods_used=[m for m in config.methods if m in [DetectionMethod.ISOLATION_FOREST, DetectionMethod.ELLIPTIC_ENVELOPE]]
            )
            
            # 异常聚类
            if config.auto_clustering and anomalies:
                result.anomaly_clusters = self._cluster_anomalies(anomalies, config)
            
            return result
            
        except Exception as e:
            self.logger.error(f"多变量异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    def detect_time_series_anomalies(self, 
                                   data: pd.DataFrame, 
                                   time_column: str,
                                   value_column: str,
                                   config: DetectionConfig = None) -> DetectionResult:
        """
        时间序列异常检测
        
        Args:
            data: 输入数据DataFrame
            time_column: 时间列名
            value_column: 数值列名
            config: 检测配置
            
        Returns:
            检测结果
        """
        try:
            if config is None:
                config = DetectionConfig()
            
            # 确保时间列是datetime类型
            if not pd.api.types.is_datetime64_any_dtype(data[time_column]):
                data[time_column] = pd.to_datetime(data[time_column])
            
            # 按时间排序
            data_sorted = data.sort_values(time_column).reset_index(drop=True)
            values = data_sorted[value_column].values
            
            anomalies = []
            
            # 滑动窗口异常检测
            window_size = config.window_size
            for i in range(window_size, len(values)):
                window = values[i-window_size:i]
                current_value = values[i]
                
                # 计算窗口统计
                window_mean = np.mean(window)
                window_std = np.std(window)
                
                if window_std > 0:
                    z_score = abs(current_value - window_mean) / window_std
                    
                    if z_score > config.z_score_threshold:
                        anomaly = AnomalyPoint(
                            index=data_sorted.index[i],
                            value=current_value,
                            column=value_column,
                            anomaly_type=AnomalyType.POINT,
                            detection_method=DetectionMethod.Z_SCORE,
                            confidence=min(z_score / config.z_score_threshold, 1.0),
                            z_score=z_score,
                            expected_value=window_mean,
                            expected_range=(window_mean - 2*window_std, window_mean + 2*window_std)
                        )
                        anomaly.severity = self._determine_severity(z_score, config.z_score_threshold, config.z_score_threshold * 2)
                        anomalies.append(anomaly)
            
            # 季节性异常检测
            if config.seasonal_period:
                seasonal_anomalies = self._detect_seasonal_anomalies(
                    values, config.seasonal_period, config
                )
                anomalies.extend(seasonal_anomalies)
            
            # 趋势异常检测
            trend_anomalies = self._detect_trend_anomalies(values, config)
            anomalies.extend(trend_anomalies)
            
            # 创建结果
            result = DetectionResult(
                total_points=len(data),
                anomaly_points=anomalies,
                methods_used=[DetectionMethod.Z_SCORE]
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"时间序列异常检测失败: {str(e)}")
            return DetectionResult(
                success=False,
                error_message=str(e),
                total_points=len(data) if data is not None else 0
            )
    
    # 具体检测方法实现
    def _detect_z_score_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """Z分数异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 3:
                continue
            
            mean_val = values.mean()
            std_val = values.std()
            
            if std_val == 0:
                continue
            
            z_scores = np.abs((values - mean_val) / std_val)
            
            for idx, z_score in zip(values.index, z_scores):
                if z_score > config.z_score_threshold:
                    anomaly = AnomalyPoint(
                        index=idx,
                        value=data.loc[idx, column],
                        column=column,
                        anomaly_type=AnomalyType.STATISTICAL,
                        detection_method=DetectionMethod.Z_SCORE,
                        confidence=min(z_score / config.z_score_threshold, 1.0),
                        z_score=z_score,
                        expected_value=mean_val,
                        expected_range=(mean_val - 2*std_val, mean_val + 2*std_val)
                    )
                    anomaly.severity = self._determine_severity(z_score, config.z_score_threshold, config.z_score_threshold * 2)
                    anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_iqr_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """IQR异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 4:
                continue
            
            Q1 = values.quantile(0.25)
            Q3 = values.quantile(0.75)
            IQR = Q3 - Q1
            
            if IQR == 0:
                continue
            
            lower_bound = Q1 - config.iqr_multiplier * IQR
            upper_bound = Q3 + config.iqr_multiplier * IQR
            
            for idx in values.index:
                value = data.loc[idx, column]
                if value < lower_bound or value > upper_bound:
                    # 计算距离边界的程度
                    if value < lower_bound:
                        distance = (lower_bound - value) / IQR
                    else:
                        distance = (value - upper_bound) / IQR
                    
                    anomaly = AnomalyPoint(
                        index=idx,
                        value=value,
                        column=column,
                        anomaly_type=AnomalyType.OUTLIER,
                        detection_method=DetectionMethod.IQR,
                        confidence=min(distance / config.iqr_multiplier, 1.0),
                        percentile=stats.percentileofscore(values, value),
                        expected_range=(lower_bound, upper_bound)
                    )
                    anomaly.severity = self._determine_severity(distance, config.iqr_multiplier, config.iqr_multiplier * 2)
                    anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_isolation_forest_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """孤立森林异常检测"""
        anomalies = []
        
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if len(numeric_data) < 10:
            return anomalies
        
        # 标准化数据
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(numeric_data)
        
        # 孤立森林
        iso_forest = IsolationForest(
            contamination=config.contamination,
            random_state=42
        )
        outlier_labels = iso_forest.fit_predict(scaled_data)
        anomaly_scores = iso_forest.decision_function(scaled_data)
        
        for i, (label, score) in enumerate(zip(outlier_labels, anomaly_scores)):
            if label == -1:  # 异常点
                original_idx = numeric_data.index[i]
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=numeric_data.iloc[i].to_dict(),
                    column="multivariate",
                    anomaly_type=AnomalyType.POINT,
                    detection_method=DetectionMethod.ISOLATION_FOREST,
                    confidence=abs(score),
                    distance_to_center=abs(score)
                )
                anomaly.severity = self._determine_severity(abs(score), 0.0, 1.0)
                anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_dbscan_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """DBSCAN密度聚类异常检测"""
        anomalies = []
        
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if len(numeric_data) < config.min_samples:
            return anomalies
        
        # 标准化数据
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(numeric_data)
        
        # DBSCAN聚类
        dbscan = DBSCAN(eps=config.eps, min_samples=config.min_samples)
        cluster_labels = dbscan.fit_predict(scaled_data)
        
        # 标签为-1的点是噪声点（异常点）
        for i, label in enumerate(cluster_labels):
            if label == -1:
                original_idx = numeric_data.index[i]
                
                # 计算到最近聚类中心的距离
                distances = []
                for cluster_id in set(cluster_labels):
                    if cluster_id != -1:
                        cluster_points = scaled_data[cluster_labels == cluster_id]
                        if len(cluster_points) > 0:
                            cluster_center = np.mean(cluster_points, axis=0)
                            distance = np.linalg.norm(scaled_data[i] - cluster_center)
                            distances.append(distance)
                
                min_distance = min(distances) if distances else 0
                
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=numeric_data.iloc[i].to_dict(),
                    column="multivariate",
                    anomaly_type=AnomalyType.COLLECTIVE,
                    detection_method=DetectionMethod.DBSCAN,
                    confidence=min(min_distance / 5, 1.0),
                    distance_to_center=min_distance
                )
                anomaly.severity = self._determine_severity(min_distance, 1.0, 3.0)
                anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_elliptic_envelope_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """椭圆包络异常检测"""
        anomalies = []
        
        numeric_data = data.select_dtypes(include=[np.number]).dropna()
        if len(numeric_data) < 10:
            return anomalies
        
        # 标准化数据
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(numeric_data)
        
        # 椭圆包络
        envelope = EllipticEnvelope(contamination=config.contamination)
        outlier_labels = envelope.fit_predict(scaled_data)
        
        for i, label in enumerate(outlier_labels):
            if label == -1:  # 异常点
                original_idx = numeric_data.index[i]
                
                # 计算马氏距离
                point = scaled_data[i].reshape(1, -1)
                mahal_dist = envelope.mahalanobis(point)[0]
                
                anomaly = AnomalyPoint(
                    index=original_idx,
                    value=numeric_data.iloc[i].to_dict(),
                    column="multivariate",
                    anomaly_type=AnomalyType.STATISTICAL,
                    detection_method=DetectionMethod.ELLIPTIC_ENVELOPE,
                    confidence=min(mahal_dist / 10, 1.0),
                    distance_to_center=mahal_dist
                )
                anomaly.severity = self._determine_severity(mahal_dist, 3.0, 6.0)
                anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_statistical_test_anomalies(self, data: pd.DataFrame, config: DetectionConfig) -> List[AnomalyPoint]:
        """统计检验异常检测"""
        anomalies = []
        
        for column in data.select_dtypes(include=[np.number]).columns:
            values = data[column].dropna()
            if len(values) < 8:
                continue
            
            # Grubbs检验（检测单个异常值）
            try:
                from scipy.stats import t
                
                n = len(values)
                mean_val = values.mean()
                std_val = values.std()
                
                if std_val == 0:
                    continue
                
                # 计算Grubbs统计量
                max_deviation_idx = (np.abs(values - mean_val)).idxmax()
                max_deviation = abs(values[max_deviation_idx] - mean_val)
                grubbs_stat = max_deviation / std_val
                
                # 临界值
                t_critical = t.ppf(1 - 0.05/(2*n), n-2)
                grubbs_critical = ((n-1) / np.sqrt(n)) * np.sqrt(t_critical**2 / (n-2 + t_critical**2))
                
                if grubbs_stat > grubbs_critical:
                    anomaly = AnomalyPoint(
                        index=max_deviation_idx,
                        value=values[max_deviation_idx],
                        column=column,
                        anomaly_type=AnomalyType.STATISTICAL,
                        detection_method=DetectionMethod.STATISTICAL_TEST,
                        confidence=min(grubbs_stat / grubbs_critical, 1.0),
                        z_score=grubbs_stat,
                        expected_value=mean_val
                    )
                    anomaly.severity = self._determine_severity(grubbs_stat, grubbs_critical, grubbs_critical * 1.5)
                    anomalies.append(anomaly)
                    
            except ImportError:
                # 如果scipy不可用，使用简化的统计检验
                pass
        
        return anomalies    

    def _detect_seasonal_anomalies(self, values: np.ndarray, period: int, config: DetectionConfig) -> List[AnomalyPoint]:
        """季节性异常检测"""
        anomalies = []
        
        if len(values) < period * 2:
            return anomalies
        
        # 计算季节性基线
        seasonal_means = {}
        seasonal_stds = {}
        
        for i in range(period):
            seasonal_values = values[i::period]
            if len(seasonal_values) > 1:
                seasonal_means[i] = np.mean(seasonal_values)
                seasonal_stds[i] = np.std(seasonal_values)
        
        # 检测季节性异常
        for i, value in enumerate(values):
            season_idx = i % period
            
            if season_idx in seasonal_means and seasonal_stds[season_idx] > 0:
                expected_mean = seasonal_means[season_idx]
                expected_std = seasonal_stds[season_idx]
                
                z_score = abs(value - expected_mean) / expected_std
                
                if z_score > config.z_score_threshold:
                    anomaly = AnomalyPoint(
                        index=i,
                        value=value,
                        column="time_series",
                        anomaly_type=AnomalyType.SEASONAL,
                        detection_method=DetectionMethod.Z_SCORE,
                        confidence=min(z_score / config.z_score_threshold, 1.0),
                        z_score=z_score,
                        expected_value=expected_mean,
                        expected_range=(expected_mean - 2*expected_std, expected_mean + 2*expected_std),
                        context_features={'season': season_idx, 'period': period}
                    )
                    anomaly.severity = self._determine_severity(z_score, config.z_score_threshold, config.z_score_threshold * 2)
                    anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_trend_anomalies(self, values: np.ndarray, config: DetectionConfig) -> List[AnomalyPoint]:
        """趋势异常检测"""
        anomalies = []
        
        if len(values) < config.window_size * 2:
            return anomalies
        
        # 计算移动平均和趋势
        window_size = config.window_size
        
        for i in range(window_size, len(values) - window_size):
            # 前窗口和后窗口
            prev_window = values[i-window_size:i]
            next_window = values[i:i+window_size]
            
            # 计算趋势变化
            prev_trend = np.polyfit(range(window_size), prev_window, 1)[0]
            next_trend = np.polyfit(range(window_size), next_window, 1)[0]
            
            # 趋势变化幅度
            trend_change = abs(next_trend - prev_trend)
            
            # 计算基线趋势变化
            all_trends = []
                
   
 # 辅助方法
    def _detect_seasonal_anomalies(self, values: np.ndarray, period: int, config: DetectionConfig) -> List[AnomalyPoint]:
        """检测季节性异常"""
        anomalies = []
        
        if len(values) < period * 2:
            return anomalies
        
        # 计算季节性基线
        seasonal_means = []
        seasonal_stds = []
        
        for i in range(period):
            seasonal_values = values[i::period]
            if len(seasonal_values) > 1:
                seasonal_means.append(np.mean(seasonal_values))
                seasonal_stds.append(np.std(seasonal_values))
            else:
                seasonal_means.append(seasonal_values[0] if len(seasonal_values) > 0 else 0)
                seasonal_stds.append(1.0)
        
        # 检测季节性异常
        for i, value in enumerate(values):
            season_idx = i % period
            expected_mean = seasonal_means[season_idx]
            expected_std = seasonal_stds[season_idx]
            
            if expected_std > 0:
                z_score = abs(value - expected_mean) / expected_std
                
                if z_score > config.z_score_threshold:
                    anomaly = AnomalyPoint(
                        index=i,
                        value=value,
                        column="time_series",
                        anomaly_type=AnomalyType.SEASONAL,
                        detection_method=DetectionMethod.Z_SCORE,
                        confidence=min(z_score / config.z_score_threshold, 1.0),
                        z_score=z_score,
                        expected_value=expected_mean,
                        context_features={'season': season_idx, 'period': period}
                    )
                    anomaly.severity = self._determine_severity(z_score, config.z_score_threshold, config.z_score_threshold * 2)
                    anomalies.append(anomaly)
        
        return anomalies
    
    def _detect_trend_anomalies(self, values: np.ndarray, config: DetectionConfig) -> List[AnomalyPoint]:
        """检测趋势异常"""
        anomalies = []
        
        if len(values) < config.window_size * 2:
            return anomalies
        
        # 计算移动平均和趋势
        window_size = config.window_size
        for i in range(window_size, len(values) - window_size):
            # 前窗口和后窗口
            prev_window = values[i-window_size:i]
            next_window = values[i:i+window_size]
            
            # 计算趋势变化
            prev_trend = np.polyfit(range(window_size), prev_window, 1)[0]
            next_trend = np.polyfit(range(window_size), next_window, 1)[0]
            
            trend_change = abs(next_trend - prev_trend)
            
            # 如果趋势变化过大，标记为异常
            if trend_change > np.std(values) * 0.5:  # 阈值可调
                anomaly = AnomalyPoint(
                    index=i,
                    value=values[i],
                    column="time_series",
                    anomaly_type=AnomalyType.TREND,
                    detection_method=DetectionMethod.STATISTICAL_TEST,
                    confidence=min(trend_change / (np.std(values) * 0.5), 1.0),
                    context_features={
                        'prev_trend': prev_trend,
                        'next_trend': next_trend,
                        'trend_change': trend_change
                    }
                )
                anomaly.severity = self._determine_severity(trend_change, np.std(values) * 0.5, np.std(values))
                anomalies.append(anomaly)
        
        return anomalies
    
    def _merge_anomalies(self, anomalies: List[AnomalyPoint], config: DetectionConfig) -> List[AnomalyPoint]:
        """合并和去重异常点"""
        if not anomalies:
            return []
        
        # 按索引和列分组
        anomaly_groups = {}
        for anomaly in anomalies:
            key = (anomaly.index, anomaly.column)
            if key not in anomaly_groups:
                anomaly_groups[key] = []
            anomaly_groups[key].append(anomaly)
        
        # 合并同一位置的异常
        merged_anomalies = []
        for group in anomaly_groups.values():
            if len(group) == 1:
                merged_anomalies.append(group[0])
            else:
                # 选择置信度最高的异常
                best_anomaly = max(group, key=lambda x: x.confidence)
                
                # 合并检测方法信息
                methods = [a.detection_method.value for a in group]
                best_anomaly.context_features['detection_methods'] = methods
                best_anomaly.context_features['method_count'] = len(set(methods))
                
                # 提高置信度（多种方法检测到）
                best_anomaly.confidence = min(best_anomaly.confidence * (1 + 0.1 * len(set(methods))), 1.0)
                
                merged_anomalies.append(best_anomaly)
        
        # 过滤低置信度异常
        filtered_anomalies = [
            a for a in merged_anomalies 
            if a.confidence >= config.min_confidence
        ]
        
        # 过滤严重程度
        if config.severity_filter:
            filtered_anomalies = [
                a for a in filtered_anomalies 
                if a.severity in config.severity_filter
            ]
        
        return filtered_anomalies
    
    def _cluster_anomalies(self, anomalies: List[AnomalyPoint], config: DetectionConfig) -> List[AnomalyCluster]:
        """异常聚类"""
        if len(anomalies) < 2:
            return []
        
        clusters = []
        
        try:
            # 提取特征用于聚类
            features = []
            for anomaly in anomalies:
                feature = [
                    anomaly.index,
                    anomaly.confidence,
                    hash(anomaly.column) % 1000,  # 列名哈希
                    anomaly.anomaly_type.value.__hash__() % 1000  # 类型哈希
                ]
                features.append(feature)
            
            features = np.array(features)
            
            # 标准化特征
            scaler = StandardScaler()
            scaled_features = scaler.fit_transform(features)
            
            # DBSCAN聚类
            dbscan = DBSCAN(eps=config.eps, min_samples=max(2, config.min_samples // 2))
            cluster_labels = dbscan.fit_predict(scaled_features)
            
            # 创建聚类
            cluster_dict = {}
            for i, label in enumerate(cluster_labels):
                if label != -1:  # 不是噪声点
                    if label not in cluster_dict:
                        cluster_dict[label] = []
                    cluster_dict[label].append(anomalies[i])
            
            # 生成聚类对象
            for cluster_id, cluster_anomalies in cluster_dict.items():
                if len(cluster_anomalies) >= 2:  # 至少2个点才形成聚类
                    cluster = AnomalyCluster(
                        cluster_id=f"cluster_{cluster_id}",
                        anomaly_points=cluster_anomalies,
                        cluster_type=self._determine_cluster_type(cluster_anomalies)
                    )
                    cluster.update_statistics()
                    clusters.append(cluster)
            
        except Exception as e:
            self.logger.warning(f"异常聚类失败: {str(e)}")
        
        return clusters
    
    def _determine_cluster_type(self, anomalies: List[AnomalyPoint]) -> AnomalyType:
        """确定聚类类型"""
        # 统计异常类型
        type_counts = {}
        for anomaly in anomalies:
            anomaly_type = anomaly.anomaly_type
            type_counts[anomaly_type] = type_counts.get(anomaly_type, 0) + 1
        
        # 返回最常见的类型
        if type_counts:
            return max(type_counts.keys(), key=lambda k: type_counts[k])
        else:
            return AnomalyType.COLLECTIVE
    
    def _determine_severity(self, score: float, threshold: float, high_threshold: float) -> AnomalySeverity:
        """确定异常严重程度"""
        if score >= high_threshold:
            return AnomalySeverity.CRITICAL
        elif score >= threshold * 1.5:
            return AnomalySeverity.HIGH
        elif score >= threshold:
            return AnomalySeverity.MEDIUM
        else:
            return AnomalySeverity.LOW
    
    def _generate_recommendations(self, anomalies: List[AnomalyPoint]):
        """生成处理建议"""
        for anomaly in anomalies:
            # 基础建议
            base_recommendation = self.recommendation_templates.get(
                anomaly.anomaly_type, 
                "建议进一步分析该异常点的成因"
            )
            
            # 根据严重程度调整建议
            severity_actions = {
                AnomalySeverity.LOW: "可以暂时忽略，但建议持续监控",
                AnomalySeverity.MEDIUM: "建议进一步调查原因",
                AnomalySeverity.HIGH: "需要立即关注和处理",
                AnomalySeverity.CRITICAL: "紧急处理，可能影响数据质量"
            }
            
            severity_action = severity_actions.get(anomaly.severity, "")
            
            # 根据检测方法添加具体建议
            method_suggestions = {
                DetectionMethod.Z_SCORE: "建议检查数据分布是否正常",
                DetectionMethod.IQR: "建议确认是否为真实的极值",
                DetectionMethod.ISOLATION_FOREST: "建议分析该点与其他数据的差异",
                DetectionMethod.DBSCAN: "建议检查是否存在数据录入错误",
                DetectionMethod.ELLIPTIC_ENVELOPE: "建议验证数据的多维度一致性"
            }
            
            method_suggestion = method_suggestions.get(anomaly.detection_method, "")
            
            # 组合建议
            recommendations = [base_recommendation]
            if severity_action:
                recommendations.append(severity_action)
            if method_suggestion:
                recommendations.append(method_suggestion)
            
            anomaly.recommended_action = "；".join(recommendations)
            
            # 设置处理优先级
            priority_map = {
                AnomalySeverity.LOW: 4,
                AnomalySeverity.MEDIUM: 3,
                AnomalySeverity.HIGH: 2,
                AnomalySeverity.CRITICAL: 1
            }
            anomaly.processing_priority = priority_map.get(anomaly.severity, 3)
    
    def _generate_summary(self, result: DetectionResult) -> Dict[str, Any]:
        """生成检测汇总"""
        summary = {
            'total_anomalies': len(result.anomaly_points),
            'anomaly_rate': result.get_anomaly_rate(),
            'severity_distribution': result.get_severity_distribution(),
            'method_performance': result.get_method_performance(),
            'cluster_count': len(result.anomaly_clusters)
        }
        
        # 按列统计异常
        column_stats = {}
        for anomaly in result.anomaly_points:
            column = anomaly.column
            if column not in column_stats:
                column_stats[column] = {
                    'count': 0,
                    'avg_confidence': 0.0,
                    'max_severity': AnomalySeverity.LOW
                }
            
            column_stats[column]['count'] += 1
            column_stats[column]['avg_confidence'] += anomaly.confidence
            
            if anomaly.severity.value > column_stats[column]['max_severity'].value:
                column_stats[column]['max_severity'] = anomaly.severity
        
        # 计算平均置信度
        for column, stats in column_stats.items():
            if stats['count'] > 0:
                stats['avg_confidence'] /= stats['count']
        
        summary['column_statistics'] = column_stats
        
        # 异常类型分布
        type_distribution = {}
        for anomaly in result.anomaly_points:
            anomaly_type = anomaly.anomaly_type.value
            type_distribution[anomaly_type] = type_distribution.get(anomaly_type, 0) + 1
        
        summary['type_distribution'] = type_distribution
        
        return summary
    
    def _evaluate_detection_accuracy(self, result: DetectionResult, data: pd.DataFrame) -> float:
        """评估检测准确性"""
        # 这里使用启发式方法评估准确性
        # 在实际应用中，可能需要标注数据来计算真实准确性
        
        base_accuracy = 0.8  # 基础准确性
        
        # 根据异常率调整
        anomaly_rate = result.get_anomaly_rate()
        if 0.01 <= anomaly_rate <= 0.1:  # 合理的异常率范围
            accuracy_bonus = 0.1
        elif anomaly_rate > 0.2:  # 异常率过高，可能误报较多
            accuracy_bonus = -0.2
        else:
            accuracy_bonus = 0.0
        
        # 根据方法一致性调整
        method_consistency = 0.0
        if len(result.methods_used) > 1:
            # 计算多方法检测的一致性
            multi_method_count = sum(
                1 for anomaly in result.anomaly_points 
                if anomaly.context_features.get('method_count', 1) > 1
            )
            method_consistency = multi_method_count / len(result.anomaly_points) * 0.1
        
        return min(1.0, base_accuracy + accuracy_bonus + method_consistency)
    
    def _calculate_coverage(self, result: DetectionResult, data: pd.DataFrame) -> float:
        """计算检测覆盖率"""
        # 计算被检测的数据点比例
        if result.total_points == 0:
            return 0.0
        
        # 考虑缺失值和无法检测的数据
        numeric_columns = data.select_dtypes(include=[np.number]).columns
        valid_data_points = 0
        
        for column in numeric_columns:
            valid_data_points += data[column].notna().sum()
        
        if valid_data_points == 0:
            return 0.0
        
        # 覆盖率基于有效数据点
        coverage = min(1.0, result.total_points / len(data))
        
        return coverage
    
    def get_anomaly_report(self, result: DetectionResult) -> str:
        """生成异常检测报告"""
        if not result.success:
            return f"异常检测失败: {result.error_message}"
        
        report_lines = [
            "=== 异常检测报告 ===",
            f"检测时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"总数据点: {result.total_points}",
            f"检测到异常: {result.get_anomaly_count()} 个",
            f"异常率: {result.get_anomaly_rate():.2%}",
            f"检测耗时: {result.detection_time_ms:.2f} ms",
            "",
            "=== 严重程度分布 ===",
        ]
        
        severity_dist = result.get_severity_distribution()
        for severity, count in severity_dist.items():
            report_lines.append(f"{severity}: {count} 个")
        
        if result.anomaly_clusters:
            report_lines.extend([
                "",
                "=== 异常聚类 ===",
                f"发现 {len(result.anomaly_clusters)} 个异常聚类"
            ])
            
            for cluster in result.anomaly_clusters:
                report_lines.append(
                    f"聚类 {cluster.cluster_id}: {cluster.size} 个异常点, "
                    f"最高严重程度: {cluster.max_severity.value}"
                )
        
        # 高优先级异常
        high_priority_anomalies = [
            a for a in result.anomaly_points 
            if a.severity in [AnomalySeverity.HIGH, AnomalySeverity.CRITICAL]
        ]
        
        if high_priority_anomalies:
            report_lines.extend([
                "",
                "=== 高优先级异常 ===",
            ])
            
            for anomaly in high_priority_anomalies[:10]:  # 只显示前10个
                report_lines.append(
                    f"索引 {anomaly.index}, 列 {anomaly.column}: "
                    f"{anomaly.severity.value} (置信度: {anomaly.confidence:.2f})"
                )
                if anomaly.recommended_action:
                    report_lines.append(f"  建议: {anomaly.recommended_action}")
        
        return "\n".join(report_lines)
    
    def export_anomalies_to_csv(self, result: DetectionResult, file_path: str) -> bool:
        """导出异常到CSV文件"""
        try:
            if not result.anomaly_points:
                self.logger.warning("没有异常点可导出")
                return False
            
            # 准备数据
            export_data = []
            for anomaly in result.anomaly_points:
                row = {
                    'index': anomaly.index,
                    'column': anomaly.column,
                    'value': anomaly.value,
                    'anomaly_type': anomaly.anomaly_type.value,
                    'severity': anomaly.severity.value,
                    'confidence': anomaly.confidence,
                    'detection_method': anomaly.detection_method.value,
                    'z_score': anomaly.z_score,
                    'percentile': anomaly.percentile,
                    'expected_value': anomaly.expected_value,
                    'recommended_action': anomaly.recommended_action,
                    'processing_priority': anomaly.processing_priority,
                    'detection_time': anomaly.detection_time.isoformat()
                }
                export_data.append(row)
            
            # 创建DataFrame并导出
            df = pd.DataFrame(export_data)
            df.to_csv(file_path, index=False, encoding='utf-8')
            
            self.logger.info(f"异常数据已导出到: {file_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"导出异常数据失败: {str(e)}")
            return False