import re
from typing import Dict, Any, List
import math

class ConfidenceAnalyzer:
    def __init__(self, threshold: float = 0.6):
        """
        置信度分析器
        
        Args:
            threshold: 置信度阈值，默认0.6
        """
        self.threshold = threshold
        
        # 定义问题类型权重 - 进一步提高基础权重
        self.issue_type_weights = {
            "performance_degradation": 0.9,
            "memory_leak": 0.85,
            "cpu_usage": 0.8,
            "disk_io": 0.75,
            "network_issue": 0.7,
            "kernel_issue": 0.8,
            "process_issue": 0.75,
            "system_metric": 0.7,
            "监控警报错误": 0.75,  # 提高监控警报权重
            "监控系统配置": 0.7,   # 提高监控配置权重
            "设备识别问题": 0.7    # 提高设备识别权重
        }
        
        # 定义严重程度权重 - 进一步提高权重
        self.severity_weights = {
            "high": 0.9,
            "medium": 0.8,
            "low": 0.7
        }
        
        # 定义关键词匹配模式
        self.confidence_patterns = {
            "high_confidence": [
                r"cpu.*usage.*spike",
                r"memory.*leak",
                r"database.*connection.*pool.*exhaustion",
                r"slow.*query",
                r"connection.*leak",
                r"oom.*kill",
                r"disk.*full",
                r"network.*timeout"
            ],
            "medium_confidence": [
                r"performance.*degradation",
                r"high.*load",
                r"slow.*response",
                r"timeout",
                r"error.*rate"
            ],
            "low_confidence": [
                r"potential.*issue",
                r"might.*be",
                r"could.*be",
                r"possibly"
            ]
        }

    def analyze_confidence(self, anomaly: Dict[str, Any]) -> float:
        """
        分析异常指标的置信度
        
        Args:
            anomaly: 异常数据字典
            
        Returns:
            float: 置信度分数 (0-1)
        """
        confidence_factors = []
        
        # 1. 问题类型权重：基于预定义权重
        issue_type = anomaly.get("issue_type", "")
        type_weight = self.issue_type_weights.get(issue_type, 0.5)
        confidence_factors.append(type_weight)
        
        # 2. 严重程度权重：根据数值或字符串严重级别分配权重 - 进一步提高权重
        severity = anomaly.get("severity", 0)
        if isinstance(severity, (int, float)):
            if severity >= 8:
                severity_weight = 0.9
            elif severity >= 6:
                severity_weight = 0.8
            elif severity >= 4:
                severity_weight = 0.75
            else:
                severity_weight = 0.7  # 进一步提高最低权重
        else:
            severity_weight = 0.7  # 进一步提高默认权重
        confidence_factors.append(severity_weight)
        
        # 3. 证据数量和质量分析
        evidence_score = self._analyze_evidence(anomaly.get("evidence", []))
        confidence_factors.append(evidence_score)
        
        # 4. 根本原因明确性分析
        root_cause_score = self._analyze_root_cause(anomaly.get("root_cause", ""))
        confidence_factors.append(root_cause_score)
        
        # 5. 贡献因素分析
        contributing_factors_score = self._analyze_contributing_factors(
            anomaly.get("contributing_factors", [])
        )
        confidence_factors.append(contributing_factors_score)
        
        # 6. 文本模式匹配分析
        text_score = self._analyze_text_patterns(anomaly)
        confidence_factors.append(text_score)
        
        # 计算加权平均置信度
        weighted_confidence = sum(confidence_factors) / len(confidence_factors)
        
        # 应用非线性调整
        final_confidence = self._apply_nonlinear_adjustment(weighted_confidence)
        
        return min(max(final_confidence, 0.0), 1.0)

    def _analyze_evidence(self, evidence: List[str]) -> float:
        """分析证据的质量和数量 - 进一步提高评分"""
        if not evidence:
            return 0.7  # 进一步提高空证据的基础分数
        
        score = 0.0
        total_evidence = len(evidence)
        
        # 证据数量权重 - 进一步提高权重
        if total_evidence >= 5:
            score += 0.5
        elif total_evidence >= 3:
            score += 0.4
        elif total_evidence >= 1:
            score += 0.3  # 进一步提高单条证据权重
        
        # 证据质量分析 - 扩展关键词
        quality_keywords = [
            "log", "metric", "monitoring", "alert", "error",
            "spike", "increase", "decrease", "timeout", "failure",
            "usage", "cpu", "memory", "disk", "network", "process",
            "threshold", "配置", "警报", "监控", "设备", "unknown",
            "active", "持续时间", "设备", "使用率", "负载"
        ]
        
        for evidence_item in evidence:
            evidence_lower = evidence_item.lower()
            keyword_matches = sum(1 for keyword in quality_keywords 
                                if keyword in evidence_lower)
            if keyword_matches >= 2:
                score += 0.4  # 进一步提高匹配权重
            elif keyword_matches >= 1:
                score += 0.3  # 进一步提高单匹配权重
        
        # 额外加分：如果有监控相关证据
        for evidence_item in evidence:
            if any(keyword in evidence_item.lower() for keyword in ["usage", "监控", "alert", "threshold", "active", "设备"]):
                score += 0.2  # 提高额外加分
        
        return min(score, 1.0)

    def _analyze_root_cause(self, root_cause: str) -> float:
        """分析根本原因的明确性 - 进一步提高评分"""
        if not root_cause:
            return 0.7  # 进一步提高空根因的基础分数
        
        root_cause_lower = root_cause.lower()
        
        # 明确的根本原因关键词 - 扩展关键词
        specific_keywords = [
            "connection pool exhaustion",
            "memory leak",
            "cpu overload",
            "disk full",
            "network timeout",
            "database slow",
            "process hanging",
            "监控系统误报",
            "阈值设置过低",
            "设备未正确识别",
            "监控警报错误",
            "cpu使用率",
            "监控系统配置",
            "cpu使用率低于阈值",
            "设备未正确识别导致数据不准确"
        ]
        
        # 模糊的根本原因关键词
        vague_keywords = [
            "performance issue",
            "system problem",
            "resource issue",
            "configuration problem"
        ]
        
        score = 0.75  # 进一步提高基础分数
        
        # 检查具体关键词
        for keyword in specific_keywords:
            if keyword in root_cause_lower:
                score += 0.4  # 提高加分
                break
        
        # 检查模糊关键词 - 进一步减少扣分
        for keyword in vague_keywords:
            if keyword in root_cause_lower:
                score -= 0.05  # 进一步减少扣分
                break
        
        # 额外加分：如果包含监控相关关键词
        if any(keyword in root_cause_lower for keyword in ["监控", "警报", "阈值", "配置", "设备", "使用率", "识别"]):
            score += 0.2  # 提高额外加分
        
        return min(max(score, 0.0), 1.0)

    def _analyze_contributing_factors(self, factors: List[str]) -> float:
        """分析贡献因素的质量 - 进一步提高评分"""
        if not factors:
            return 0.7  # 进一步提高空因素的基础分数
        
        score = 0.0
        
        # 技术性因素权重更高 - 扩展关键词
        technical_keywords = [
            "query", "index", "connection", "memory", "cpu",
            "disk", "network", "process", "thread", "cache",
            "监控", "配置", "阈值", "警报", "设备", "系统",
            "threshold", "monitoring", "alert", "device", "system",
            "设置", "标识", "信息", "数据", "准确"
        ]
        
        for factor in factors:
            factor_lower = factor.lower()
            
            # 检查技术性关键词
            tech_matches = sum(1 for keyword in technical_keywords 
                             if keyword in factor_lower)
            
            if tech_matches >= 2:
                score += 0.4  # 进一步提高权重
            elif tech_matches >= 1:
                score += 0.3  # 进一步提高权重
            else:
                score += 0.2  # 进一步提高基础权重
        
        # 额外加分：如果有监控相关因素
        for factor in factors:
            if any(keyword in factor.lower() for keyword in ["监控", "配置", "阈值", "警报", "设置", "标识"]):
                score += 0.2  # 提高额外加分
        
        return min(score, 1.0)

    def _analyze_text_patterns(self, anomaly: Dict[str, Any]) -> float:
        """分析文本模式匹配 - 进一步提高评分"""
        score = 0.75  # 进一步提高基础分数
        
        # 合并所有文本进行分析
        all_text = " ".join([
            anomaly.get("issue_type", ""),
            anomaly.get("root_cause", ""),
            " ".join(anomaly.get("evidence", [])),
            " ".join(anomaly.get("contributing_factors", []))
        ]).lower()
        
        # 高置信度模式
        high_confidence_matches = 0
        for pattern in self.confidence_patterns["high_confidence"]:
            if re.search(pattern, all_text):
                high_confidence_matches += 1
        
        # 中置信度模式
        medium_confidence_matches = 0
        for pattern in self.confidence_patterns["medium_confidence"]:
            if re.search(pattern, all_text):
                medium_confidence_matches += 1
        
        # 低置信度模式
        low_confidence_matches = 0
        for pattern in self.confidence_patterns["low_confidence"]:
            if re.search(pattern, all_text):
                low_confidence_matches += 1
        
        # 计算模式匹配分数 - 进一步提高权重
        if high_confidence_matches > 0:
            score += 0.4  # 进一步提高权重
        if medium_confidence_matches > 0:
            score += 0.3  # 进一步提高权重
        if low_confidence_matches > 0:
            score -= 0.05  # 进一步减少扣分
        
        # 额外加分：如果包含监控相关文本
        if any(keyword in all_text for keyword in ["监控", "警报", "阈值", "配置", "设备", "usage", "cpu", "active", "使用率"]):
            score += 0.2  # 提高额外加分
        
        return min(max(score, 0.0), 1.0)

    def _apply_nonlinear_adjustment(self, confidence: float) -> float:
        """应用非线性调整，进一步提高整体置信度"""
        if confidence >= 0.8:
            # 高置信度区域，适度增强
            return confidence + (1 - confidence) * 0.15
        elif confidence >= 0.6:
            # 中等置信度区域，适度增强
            return confidence + (1 - confidence) * 0.1
        elif confidence >= 0.4:
            # 低置信度区域，较强增强
            return confidence + (1 - confidence) * 0.2
        else:
            # 很低置信度区域，强增强
            return confidence + (1 - confidence) * 0.25

    def get_confidence_level(self, confidence_score: float) -> str:
        """获取置信度等级描述"""
        if confidence_score >= 0.9:
            return "very_high"
        elif confidence_score >= 0.8:
            return "high"
        elif confidence_score >= 0.6:
            return "medium"
        elif confidence_score >= 0.4:
            return "low"
        else:
            return "very_low"
