"""
分析工具集模块
定义各种日志分析函数，供LLM通过工具调用使用
"""
import re
import json
from typing import Dict, List, Any, Optional
from datetime import datetime, timedelta
from collections import Counter, defaultdict
import logging

logger = logging.getLogger(__name__)


class AnalysisTools:
    """分析工具集合类，提供各种日志分析功能"""
    
    @staticmethod
    def analyze_error_frequency(logs: List[Dict[str, Any]], time_window: Optional[int] = None) -> Dict[str, Any]:
        """
        分析错误频率
        
        Args:
            logs: 日志列表，每个元素包含 'content' 字段
            time_window: 时间窗口（分钟），如果为None则分析全部
        
        Returns:
            包含错误频率统计的字典
        """
        # 规范化日志格式：如果是字符串列表，转换为字典列表
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        error_keywords = ['error', 'ERROR', '异常', '失败', 'exception', 'Exception', 
                         'fatal', 'FATAL', 'critical', 'CRITICAL']
        
        error_logs = []
        error_types = Counter()
        
        for log in normalized_logs:
            content = log.get('content', '').lower()
            for keyword in error_keywords:
                if keyword.lower() in content:
                    error_logs.append(log)
                    # 尝试提取错误类型
                    match = re.search(r'(error|exception|异常)[:\s]+(\w+)', content, re.IGNORECASE)
                    if match:
                        error_types[match.group(2)] += 1
                    break
        
        # 统计错误总数和类型分布
        total_errors = len(error_logs)
        error_distribution = dict(error_types.most_common(10))
        
        return {
            "total_errors": total_errors,
            "error_rate": total_errors / len(normalized_logs) if normalized_logs else 0,
            "error_distribution": error_distribution,
            "sample_errors": [log.get('content', '')[:200] for log in error_logs[:5]]
        }
    
    @staticmethod
    def detect_anomalies(logs: List[Dict[str, Any]], threshold: float = 0.3) -> Dict[str, Any]:
        """
        检测异常模式
        
        Args:
            logs: 日志列表
            threshold: 异常阈值（超过该比例的日志被视为异常）
        
        Returns:
            异常检测结果
        """
        if not logs:
            return {"anomalies": [], "anomaly_count": 0}
        
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 计算日志相似度分布
        scores = [log.get('score', 0) for log in normalized_logs]
        avg_score = sum(scores) / len(scores) if scores else 0
        
        # 识别异常低分（可能不相关）或高频错误
        anomalies = []
        error_logs = AnalysisTools.analyze_error_frequency(normalized_logs)
        
        # 如果错误率超过阈值，标记为异常
        if error_logs['error_rate'] > threshold:
            anomalies.append({
                "type": "high_error_rate",
                "value": error_logs['error_rate'],
                "description": f"错误率异常高：{error_logs['error_rate']:.2%}"
            })
        
        # 检查是否有异常低相似度的日志
        low_score_logs = [log for log in normalized_logs if log.get('score', 0) < avg_score * 0.5]
        if low_score_logs:
            anomalies.append({
                "type": "low_relevance",
                "count": len(low_score_logs),
                "description": f"发现 {len(low_score_logs)} 条相关性较低的日志"
            })
        
        return {
            "anomalies": anomalies,
            "anomaly_count": len(anomalies),
            "average_relevance_score": avg_score
        }
    
    @staticmethod
    def extract_key_metrics(logs: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        提取关键指标
        
        Args:
            logs: 日志列表
        
        Returns:
            关键指标统计
        """
        if not logs:
            return {}
        
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 提取时间戳、IP地址、状态码等常见指标
        timestamps = []
        ip_addresses = []
        status_codes = []
        urls = []
        
        for log in normalized_logs:
            content = log.get('content', '')
            
            # 提取时间戳
            time_patterns = [
                r'\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}',
                r'\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}'
            ]
            for pattern in time_patterns:
                matches = re.findall(pattern, content)
                timestamps.extend(matches)
            
            # 提取IP地址
            ip_pattern = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
            ip_matches = re.findall(ip_pattern, content)
            ip_addresses.extend(ip_matches)
            
            # 提取HTTP状态码
            status_pattern = r'\b(?:GET|POST|PUT|DELETE|PATCH)\s+.*?\s+(\d{3})\b'
            status_matches = re.findall(status_pattern, content)
            status_codes.extend(status_matches)
            
            # 提取URL
            url_pattern = r'https?://[^\s]+|/[^\s]+'
            url_matches = re.findall(url_pattern, content)
            urls.extend(url_matches[:3])  # 限制每个日志最多3个URL
        
        # 统计
        ip_counter = Counter(ip_addresses)
        status_counter = Counter(status_codes)
        url_counter = Counter(urls)
        
        return {
            "total_logs": len(normalized_logs),
            "unique_timestamps": len(set(timestamps)),
            "top_ip_addresses": dict(ip_counter.most_common(10)),
            "status_code_distribution": dict(status_counter.most_common()),
            "top_urls": dict(url_counter.most_common(10)),
            "unique_ips": len(set(ip_addresses))
        }
    
    @staticmethod
    def analyze_trends(logs: List[Dict[str, Any]], metric: str = "error_count") -> Dict[str, Any]:
        """
        分析趋势（简单实现，可扩展）
        
        Args:
            logs: 日志列表
            metric: 要分析的指标
        
        Returns:
            趋势分析结果
        """
        if metric == "error_count":
            error_analysis = AnalysisTools.analyze_error_frequency(logs)
            return {
                "trend_type": "error_trend",
                "current_error_count": error_analysis["total_errors"],
                "error_rate": error_analysis["error_rate"],
                "trend": "increasing" if error_analysis["error_rate"] > 0.1 else "stable"
            }
        
        return {"trend": "unknown", "message": f"未支持的指标: {metric}"}
    
    @staticmethod
    def search_pattern(logs: List[Dict[str, Any]], pattern: str, case_sensitive: bool = False) -> Dict[str, Any]:
        """
        搜索特定模式
        
        Args:
            logs: 日志列表
            pattern: 正则表达式模式
            case_sensitive: 是否区分大小写
        
        Returns:
            匹配结果
        """
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        flags = 0 if case_sensitive else re.IGNORECASE
        matches = []
        
        try:
            regex = re.compile(pattern, flags)
            for log in normalized_logs:
                content = log.get('content', '')
                if regex.search(content):
                    matches.append({
                        "content": content[:300],
                        "score": log.get('score', 0)
                    })
        except re.error as e:
            return {
                "error": f"正则表达式错误: {str(e)}",
                "matches": []
            }
        
        return {
            "pattern": pattern,
            "match_count": len(matches),
            "matches": matches[:20]  # 最多返回20条
        }
    
    @staticmethod
    def compare_logs(logs1: List[Dict[str, Any]], logs2: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        比较两组日志
        
        Args:
            logs1: 第一组日志
            logs2: 第二组日志
        
        Returns:
            比较结果
        """
        metrics1 = AnalysisTools.extract_key_metrics(logs1)
        metrics2 = AnalysisTools.extract_key_metrics(logs2)
        
        error1 = AnalysisTools.analyze_error_frequency(logs1)
        error2 = AnalysisTools.analyze_error_frequency(logs2)
        
        return {
            "logs1": {
                "count": len(logs1),
                "error_rate": error1["error_rate"],
                "unique_ips": metrics1.get("unique_ips", 0)
            },
            "logs2": {
                "count": len(logs2),
                "error_rate": error2["error_rate"],
                "unique_ips": metrics2.get("unique_ips", 0)
            },
            "differences": {
                "count_diff": len(logs1) - len(logs2),
                "error_rate_diff": error1["error_rate"] - error2["error_rate"]
            }
        }
    
    @staticmethod
    def generate_summary(logs: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        生成日志摘要
        
        Args:
            logs: 日志列表
        
        Returns:
            摘要信息
        """
        metrics = AnalysisTools.extract_key_metrics(logs)
        error_analysis = AnalysisTools.analyze_error_frequency(logs)
        anomalies = AnalysisTools.detect_anomalies(logs)
        
        return {
            "summary": {
                "total_logs": len(logs),
                "error_count": error_analysis["total_errors"],
                "error_rate": error_analysis["error_rate"],
                "anomalies_detected": anomalies["anomaly_count"],
                "unique_ip_count": metrics.get("unique_ips", 0),
                "top_error_types": list(error_analysis["error_distribution"].keys())[:5]
            },
            "recommendations": AnalysisTools._generate_recommendations(error_analysis, anomalies)
        }
    
    @staticmethod
    def _generate_recommendations(error_analysis: Dict, anomalies: Dict) -> List[str]:
        """生成建议"""
        recommendations = []
        
        if error_analysis["error_rate"] > 0.2:
            recommendations.append("错误率较高，建议立即检查系统状态")
        
        if anomalies["anomaly_count"] > 0:
            recommendations.append("检测到异常模式，建议深入分析异常日志")
        
        if error_analysis.get("error_distribution"):
            top_error = list(error_analysis["error_distribution"].keys())[0]
            recommendations.append(f"最常见的错误类型是 {top_error}，建议优先排查")
        
        return recommendations if recommendations else ["系统运行正常，无明显异常"]
    
    @staticmethod
    def analyze_time_patterns(logs: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        分析时间模式，识别峰值和低谷
        
        Args:
            logs: 日志列表
        
        Returns:
            时间模式分析结果
        """
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 提取时间戳
        time_patterns = [
            r'(\d{4}-\d{2}-\d{2}\s+\d{2}):\d{2}:\d{2}',
            r'(\d{2}/\w{3}/\d{4}):\d{2}:\d{2}:\d{2}',
            r'\[(\d{2}:\d{2}:\d{2})\]'
        ]
        
        hour_counts = Counter()
        for log in normalized_logs:
            content = log.get('content', '')
            for pattern in time_patterns:
                matches = re.findall(pattern, content)
                for match in matches:
                    # 尝试提取小时
                    hour_match = re.search(r'(\d{2}):\d{2}', match)
                    if hour_match:
                        hour = int(hour_match.group(1))
                        hour_counts[hour] += 1
        
        # 分析峰值和低谷
        if hour_counts:
            peak_hour = hour_counts.most_common(1)[0][0] if hour_counts else None
            peak_count = hour_counts.most_common(1)[0][1] if hour_counts else 0
            avg_count = sum(hour_counts.values()) / len(hour_counts) if hour_counts else 0
            
            # 找出低谷（低于平均值的小时）
            low_hours = [h for h, c in hour_counts.items() if c < avg_count * 0.5]
        else:
            peak_hour = None
            peak_count = 0
            avg_count = 0
            low_hours = []
        
        return {
            "total_timestamps": sum(hour_counts.values()),
            "hour_distribution": dict(hour_counts.most_common(24)),
            "peak_hour": peak_hour,
            "peak_count": peak_count,
            "average_count": avg_count,
            "low_hours": low_hours[:5],
            "pattern": "high_traffic" if peak_count > avg_count * 1.5 else "normal"
        }
    
    @staticmethod
    def detect_slow_requests(logs: List[Dict[str, Any]], slow_threshold: int = 1000) -> Dict[str, Any]:
        """
        检测慢请求（假设日志中包含响应时间）
        
        Args:
            logs: 日志列表
            slow_threshold: 慢请求阈值（毫秒），默认1000ms
        
        Returns:
            慢请求分析结果
        """
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 尝试提取响应时间（多种格式）
        time_patterns = [
            r'(\d+)ms',
            r'time[=:](\d+)',
            r'duration[=:](\d+)',
            r'耗时[：:](\d+)'
        ]
        
        slow_requests = []
        response_times = []
        
        for log in normalized_logs:
            content = log.get('content', '')
            for pattern in time_patterns:
                match = re.search(pattern, content, re.IGNORECASE)
                if match:
                    try:
                        time_ms = int(match.group(1))
                        response_times.append(time_ms)
                        if time_ms > slow_threshold:
                            slow_requests.append({
                                "content": content[:200],
                                "response_time": time_ms
                            })
                        break
                    except ValueError:
                        continue
        
        return {
            "slow_requests_count": len(slow_requests),
            "total_requests": len(response_times) if response_times else len(normalized_logs),
            "average_response_time": sum(response_times) / len(response_times) if response_times else 0,
            "max_response_time": max(response_times) if response_times else 0,
            "slow_threshold": slow_threshold,
            "slow_requests": slow_requests[:10]  # 最多返回10个
        }
    
    @staticmethod
    def detect_security_issues(logs: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        检测安全问题（SQL注入、XSS等可疑模式）
        
        Args:
            logs: 日志列表
        
        Returns:
            安全问题检测结果
        """
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 安全威胁模式
        security_patterns = {
            "sql_injection": [
                r"union\s+select",
                r"';?\s*--",
                r"or\s+1\s*=\s*1",
                r"drop\s+table"
            ],
            "xss": [
                r"<script",
                r"javascript:",
                r"onerror\s*=",
                r"onclick\s*="
            ],
            "path_traversal": [
                r"\.\./",
                r"\.\.\\",
                r"%2e%2e",
                r"\.\.%2f"
            ],
            "suspicious_access": [
                r"/admin",
                r"/config",
                r"/\.env",
                r"wp-admin"
            ]
        }
        
        detected_issues = {pattern: [] for pattern in security_patterns.keys()}
        
        for log in normalized_logs:
            content = log.get('content', '').lower()
            for pattern_type, patterns in security_patterns.items():
                for pattern in patterns:
                    if re.search(pattern, content, re.IGNORECASE):
                        detected_issues[pattern_type].append({
                            "content": log.get('content', '')[:200],
                            "matched_pattern": pattern
                        })
                        break
        
        # 统计
        total_issues = sum(len(issues) for issues in detected_issues.values())
        issue_summary = {k: len(v) for k, v in detected_issues.items()}
        
        return {
            "total_security_issues": total_issues,
            "issue_breakdown": issue_summary,
            "detected_issues": {k: v[:5] for k, v in detected_issues.items()},  # 每种最多5个示例
            "risk_level": "high" if total_issues > 10 else ("medium" if total_issues > 5 else "low")
        }
    
    @staticmethod
    def statistical_analysis(logs: List[Dict[str, Any]], metric: str = "count") -> Dict[str, Any]:
        """
        统计分析
        
        Args:
            logs: 日志列表
            metric: 分析指标类型
        
        Returns:
            统计分析结果
        """
        if not logs:
            return {}
        
        # 规范化日志格式
        normalized_logs = []
        for log in logs:
            if isinstance(log, str):
                normalized_logs.append({'content': log, 'score': 0})
            elif isinstance(log, dict):
                normalized_logs.append(log)
            else:
                normalized_logs.append({'content': str(log), 'score': 0})
        
        # 提取数值进行分析（假设日志中包含数字）
        scores = [log.get('score', 0) for log in normalized_logs]
        
        if scores:
            import statistics
            mean = statistics.mean(scores) if scores else 0
            median = statistics.median(scores) if len(scores) > 1 else scores[0] if scores else 0
            std_dev = statistics.stdev(scores) if len(scores) > 1 else 0
            
            return {
                "total_samples": len(normalized_logs),
                "mean": mean,
                "median": median,
                "std_deviation": std_dev,
                "min": min(scores),
                "max": max(scores),
                "range": max(scores) - min(scores) if scores else 0
            }
        
        return {
            "total_samples": len(normalized_logs),
            "message": "无法提取数值进行分析"
        }


# 工具注册表，供工作流引擎使用
TOOLS_REGISTRY = {
    "analyze_error_frequency": {
        "function": AnalysisTools.analyze_error_frequency,
        "description": "分析日志中的错误频率和类型分布",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "time_window": {"type": "int", "required": False, "description": "时间窗口（分钟）"}
        }
    },
    "detect_anomalies": {
        "function": AnalysisTools.detect_anomalies,
        "description": "检测日志中的异常模式",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "threshold": {"type": "float", "required": False, "description": "异常阈值，默认0.3"}
        }
    },
    "extract_key_metrics": {
        "function": AnalysisTools.extract_key_metrics,
        "description": "从日志中提取关键指标（IP、状态码、URL等）",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"}
        }
    },
    "analyze_trends": {
        "function": AnalysisTools.analyze_trends,
        "description": "分析日志趋势",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "metric": {"type": "str", "required": False, "description": "要分析的指标，默认'error_count'"}
        }
    },
    "search_pattern": {
        "function": AnalysisTools.search_pattern,
        "description": "在日志中搜索特定正则表达式模式",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "pattern": {"type": "str", "required": True, "description": "正则表达式模式"},
            "case_sensitive": {"type": "bool", "required": False, "description": "是否区分大小写，默认False"}
        }
    },
    "compare_logs": {
        "function": AnalysisTools.compare_logs,
        "description": "比较两组日志的差异",
        "parameters": {
            "logs1": {"type": "list", "required": True, "description": "第一组日志"},
            "logs2": {"type": "list", "required": True, "description": "第二组日志"}
        }
    },
    "generate_summary": {
        "function": AnalysisTools.generate_summary,
        "description": "生成日志的全面摘要和建议",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"}
        }
    },
    "analyze_time_patterns": {
        "function": AnalysisTools.analyze_time_patterns,
        "description": "分析日志的时间模式，识别峰值时间和低谷时间",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"}
        }
    },
    "detect_slow_requests": {
        "function": AnalysisTools.detect_slow_requests,
        "description": "检测慢请求，分析响应时间分布",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "slow_threshold": {"type": "int", "required": False, "description": "慢请求阈值（毫秒），默认1000"}
        }
    },
    "detect_security_issues": {
        "function": AnalysisTools.detect_security_issues,
        "description": "检测安全问题，识别SQL注入、XSS等安全威胁",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"}
        }
    },
    "statistical_analysis": {
        "function": AnalysisTools.statistical_analysis,
        "description": "进行统计分析，计算均值、中位数、标准差等统计指标",
        "parameters": {
            "logs": {"type": "list", "required": True, "description": "日志列表"},
            "metric": {"type": "str", "required": False, "description": "分析指标类型，默认'count'"}
        }
    }
}
