"""
异常检测模块，用于检测训练过程中的异常情况。
"""
import torch
import numpy as np
from typing import Dict, List, Optional, Union, Tuple
from dataclasses import dataclass
from enum import Enum

class AnomalyType(Enum):
    """异常类型枚举"""
    GRADIENT_EXPLOSION = "gradient_explosion"
    GRADIENT_VANISHING = "gradient_vanishing"
    NAN_VALUE = "nan_value"
    INF_VALUE = "inf_value"
    OUTLIER = "outlier"

@dataclass
class AnomalyResult:
    """异常检测结果"""
    anomaly_type: AnomalyType
    parameter_name: str
    value: float
    threshold: float
    step: int
    details: Optional[Dict] = None

class AnomalyDetector:
    """异常检测器"""
    
    def __init__(self, 
                 grad_norm_threshold: float = 10.0,
                 grad_min_threshold: float = 1e-6,
                 outlier_std_threshold: float = 3.0,
                 history_size: int = 100):
        """
        初始化异常检测器
        
        Args:
            grad_norm_threshold: 梯度范数阈值，超过此值视为梯度爆炸
            grad_min_threshold: 梯度最小值阈值，小于此值视为梯度消失
            outlier_std_threshold: 异常值标准差阈值
            history_size: 历史数据大小，用于异常值检测
        """
        self.grad_norm_threshold = grad_norm_threshold
        self.grad_min_threshold = grad_min_threshold
        self.outlier_std_threshold = outlier_std_threshold
        self.history_size = history_size
        self.history: Dict[str, List[float]] = {}
    
    def check_gradient(self, name: str, grad: torch.Tensor, step: int) -> List[AnomalyResult]:
        """
        检查梯度异常
        
        Args:
            name: 参数名称
            grad: 梯度张量
            step: 当前步数
        
        Returns:
            异常检测结果列表
        """
        results = []
        
        # 检查NaN值
        if torch.isnan(grad).any():
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.NAN_VALUE,
                parameter_name=name,
                value=float('nan'),
                threshold=0.0,
                step=step,
                details={"nan_count": torch.isnan(grad).sum().item()}
            ))
        
        # 检查Inf值
        if torch.isinf(grad).any():
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.INF_VALUE,
                parameter_name=name,
                value=float('inf'),
                threshold=0.0,
                step=step,
                details={"inf_count": torch.isinf(grad).sum().item()}
            ))
        
        # 计算梯度范数
        grad_norm = torch.norm(grad).item()
        
        # 检查梯度爆炸
        if grad_norm > self.grad_norm_threshold:
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.GRADIENT_EXPLOSION,
                parameter_name=name,
                value=grad_norm,
                threshold=self.grad_norm_threshold,
                step=step
            ))
        
        # 检查梯度消失
        if grad_norm < self.grad_min_threshold:
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.GRADIENT_VANISHING,
                parameter_name=name,
                value=grad_norm,
                threshold=self.grad_min_threshold,
                step=step
            ))
        
        # 检查异常值
        if name not in self.history:
            self.history[name] = []
        
        self.history[name].append(grad_norm)
        if len(self.history[name]) > self.history_size:
            self.history[name].pop(0)
        
        if len(self.history[name]) >= 10:  # 至少需要10个数据点
            history_array = np.array(self.history[name])
            mean = np.mean(history_array)
            std = np.std(history_array)
            
            if abs(grad_norm - mean) > self.outlier_std_threshold * std:
                results.append(AnomalyResult(
                    anomaly_type=AnomalyType.OUTLIER,
                    parameter_name=name,
                    value=grad_norm,
                    threshold=mean + self.outlier_std_threshold * std,
                    step=step,
                    details={
                        "mean": mean,
                        "std": std,
                        "z_score": abs(grad_norm - mean) / std
                    }
                ))
        
        return results
    
    def check_parameter(self, name: str, param: torch.Tensor, step: int) -> List[AnomalyResult]:
        """
        检查参数异常
        
        Args:
            name: 参数名称
            param: 参数张量
            step: 当前步数
        
        Returns:
            异常检测结果列表
        """
        results = []
        
        # 检查NaN值
        if torch.isnan(param).any():
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.NAN_VALUE,
                parameter_name=name,
                value=float('nan'),
                threshold=0.0,
                step=step,
                details={"nan_count": torch.isnan(param).sum().item()}
            ))
        
        # 检查Inf值
        if torch.isinf(param).any():
            results.append(AnomalyResult(
                anomaly_type=AnomalyType.INF_VALUE,
                parameter_name=name,
                value=float('inf'),
                threshold=0.0,
                step=step,
                details={"inf_count": torch.isinf(param).sum().item()}
            ))
        
        return results
    
    def clear_history(self) -> None:
        """清除历史数据"""
        self.history.clear() 