#!/usr/bin/env python3
"""
分析工具集
"""

import re
import json
from typing import Dict, Any, List
from collections import Counter
from datetime import datetime
from ..core.tool import Tool, ToolResult

class LogParserTool(Tool):
    """日志解析工具"""
    
    def __init__(self):
        super().__init__(
            name="parse_logs",
            description="日志解析工具：解析和结构化日志数据"
        )
    
    def execute(self, log_data: str, log_format: str = "auto", **kwargs) -> ToolResult:
        """执行日志解析"""
        try:
            parsed_logs = []
            log_lines = log_data.strip().split('\n')
            
            for line_num, line in enumerate(log_lines, 1):
                if not line.strip():
                    continue
                
                parsed_log = self._parse_log_line(line, log_format)
                parsed_log['line_number'] = line_num
                parsed_logs.append(parsed_log)
            
            # 统计分析
            stats = self._analyze_parsed_logs(parsed_logs)
            
            return ToolResult(
                success=True,
                data={
                    "parsed_logs": parsed_logs,
                    "statistics": stats,
                    "total_lines": len(parsed_logs)
                },
                metadata={
                    "log_format": log_format,
                    "parse_time": datetime.now().isoformat()
                }
            )
            
        except Exception as e:
            return ToolResult(
                success=False,
                error=str(e)
            )
    
    def _parse_log_line(self, line: str, log_format: str) -> Dict[str, Any]:
        """解析单行日志"""
        if log_format == "auto":
            log_format = self._detect_log_format(line)
        
        parsed = {
            "raw_line": line,
            "format": log_format,
            "timestamp": None,
            "level": None,
            "message": None
        }
        
        if log_format == "json":
            parsed.update(self._parse_json_log(line))
        elif log_format == "apache":
            parsed.update(self._parse_apache_log(line))
        else:
            parsed.update(self._parse_generic_log(line))
        
        return parsed
    
    def _detect_log_format(self, line: str) -> str:
        """自动检测日志格式"""
        if line.strip().startswith('{') and line.strip().endswith('}'):
            return "json"
        elif re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', line):
            return "apache"
        else:
            return "generic"
    
    def _parse_json_log(self, line: str) -> Dict[str, Any]:
        """解析JSON格式日志"""
        try:
            data = json.loads(line)
            return {
                "timestamp": data.get("timestamp"),
                "level": data.get("level"),
                "message": data.get("message"),
                "service": data.get("service")
            }
        except:
            return {"message": line}
    
    def _parse_apache_log(self, line: str) -> Dict[str, Any]:
        """解析Apache日志"""
        pattern = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - \[(.*?)\] "(.*?)" (\d{3}) (\d+)'
        match = re.match(pattern, line)
        if match:
            ip, timestamp, request, status, size = match.groups()
            return {
                "timestamp": timestamp,
                "ip_address": ip,
                "request": request,
                "status_code": int(status),
                "level": "ERROR" if int(status) >= 400 else "INFO"
            }
        return {"message": line}
    
    def _parse_generic_log(self, line: str) -> Dict[str, Any]:
        """解析通用日志格式"""
        result = {"message": line}
        
        # 尝试提取时间戳
        timestamp_patterns = [
            r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})',
            r'(\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2})'
        ]
        
        for pattern in timestamp_patterns:
            match = re.search(pattern, line)
            if match:
                result["timestamp"] = match.group(1)
                break
        
        # 尝试提取日志级别
        level_patterns = [
            r'\[(ERROR|WARN|INFO|DEBUG|FATAL)\]',
            r'\b(ERROR|WARN|INFO|DEBUG|FATAL)\b',
            r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) \[(ERROR|WARN|INFO|DEBUG|FATAL)\]'
        ]
        
        for pattern in level_patterns:
            match = re.search(pattern, line)
            if match:
                # 如果模式有多个组，取最后一个（日志级别）
                level = match.groups()[-1] if len(match.groups()) > 1 else match.group(1)
                result["level"] = level
                break
        
        return result
    
    def _analyze_parsed_logs(self, parsed_logs: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析解析后的日志"""
        stats = {
            "total_logs": len(parsed_logs),
            "level_distribution": Counter(),
            "service_distribution": Counter(),
            "error_codes": Counter()
        }
        
        for log in parsed_logs:
            if log.get("level"):
                stats["level_distribution"][log["level"]] += 1
            if log.get("service"):
                stats["service_distribution"][log["service"]] += 1
            if log.get("status_code"):
                stats["error_codes"][log["status_code"]] += 1
        
        return stats
    
    def get_schema(self) -> Dict[str, Any]:
        """获取工具调用模式"""
        return {
            "type": "function",
            "function": {
                "name": self.name,
                "description": self.description,
                "parameters": {
                    "type": "object",
                    "properties": {
                        "log_data": {
                            "type": "string",
                            "description": "要解析的日志数据"
                        },
                        "log_format": {
                            "type": "string",
                            "enum": ["auto", "json", "apache", "generic"],
                            "description": "日志格式类型"
                        }
                    },
                    "required": ["log_data"]
                }
            }
        }

class ErrorStatisticsTool(Tool):
    """错误统计工具"""
    
    def __init__(self):
        super().__init__(
            name="count_errors",
            description="错误统计工具：统计错误分布和频率"
        )
    
    def execute(self, parsed_logs: List[Dict[str, Any]], service_name: str = "all", **kwargs) -> ToolResult:
        """执行错误统计"""
        try:
            # 过滤指定服务的日志
            service_logs = [
                log for log in parsed_logs 
                if log.get("service") == service_name or service_name == "all"
            ]
            
            # 统计错误分布
            error_stats = {
                "ERROR": 0,
                "WARN": 0,
                "FATAL": 0,
                "INFO": 0,
                "DEBUG": 0
            }
            
            error_details = []
            warning_details = []
            
            for log in service_logs:
                level = log.get("level", "")
                if level:
                    level = level.upper()
                    if level in error_stats:
                        error_stats[level] += 1
                        
                        if level == "ERROR":
                            error_details.append({
                                "timestamp": log.get("timestamp"),
                                "message": log.get("message"),
                                "service": log.get("service")
                            })
                        elif level == "WARN":
                            warning_details.append({
                                "timestamp": log.get("timestamp"),
                                "message": log.get("message"),
                                "service": log.get("service")
                            })
            
            # 计算错误率
            total_logs = len(service_logs)
            error_rate = (error_stats["ERROR"] + error_stats["FATAL"]) / total_logs * 100 if total_logs > 0 else 0
            
            return ToolResult(
                success=True,
                data={
                    "service_name": service_name,
                    "total_logs": total_logs,
                    "error_distribution": error_stats,
                    "error_rate": round(error_rate, 2),
                    "error_details": error_details[:10],
                    "warning_details": warning_details[:10]
                },
                metadata={
                    "analysis_time": datetime.now().isoformat()
                }
            )
            
        except Exception as e:
            return ToolResult(
                success=False,
                error=str(e)
            )
    
    def get_schema(self) -> Dict[str, Any]:
        """获取工具调用模式"""
        return {
            "type": "function",
            "function": {
                "name": self.name,
                "description": self.description,
                "parameters": {
                    "type": "object",
                    "properties": {
                        "parsed_logs": {
                            "type": "array",
                            "description": "解析后的日志数据"
                        },
                        "service_name": {
                            "type": "string",
                            "description": "服务名称，使用'all'统计所有服务"
                        }
                    },
                    "required": ["parsed_logs"]
                }
            }
        }

class TimePatternTool(Tool):
    """时间模式分析工具"""
    
    def __init__(self):
        super().__init__(
            name="analyze_time_patterns",
            description="时间模式分析工具：分析错误发生的时间规律"
        )
    
    def execute(self, parsed_logs: List[Dict[str, Any]], error_type: str = "ERROR", **kwargs) -> ToolResult:
        """执行时间模式分析"""
        try:
            # 过滤指定类型的错误
            filtered_logs = []
            for log in parsed_logs:
                level = log.get("level", "")
                if level and level.upper() == error_type.upper():
                    filtered_logs.append(log)
            
            if not filtered_logs:
                return ToolResult(
                    success=False,
                    error=f"未找到类型为 '{error_type}' 的日志"
                )
            
            # 时间模式分析
            time_patterns = {
                "hourly_distribution": self._analyze_hourly_pattern(filtered_logs),
                "daily_distribution": self._analyze_daily_pattern(filtered_logs),
                "peak_periods": self._identify_peak_periods(filtered_logs)
            }
            
            return ToolResult(
                success=True,
                data={
                    "error_type": error_type,
                    "total_errors": len(filtered_logs),
                    "time_patterns": time_patterns
                },
                metadata={
                    "analysis_time": datetime.now().isoformat()
                }
            )
            
        except Exception as e:
            return ToolResult(
                success=False,
                error=str(e)
            )
    
    def _analyze_hourly_pattern(self, logs: List[Dict[str, Any]]) -> Dict[str, int]:
        """分析小时分布模式"""
        hourly_count = {}
        
        for log in logs:
            timestamp = log.get("timestamp")
            if timestamp:
                try:
                    hour = self._extract_hour(timestamp)
                    if hour is not None:
                        hourly_count[str(hour)] = hourly_count.get(str(hour), 0) + 1
                except:
                    continue
        
        return hourly_count
    
    def _analyze_daily_pattern(self, logs: List[Dict[str, Any]]) -> Dict[str, int]:
        """分析日期分布模式"""
        daily_count = {}
        
        for log in logs:
            timestamp = log.get("timestamp")
            if timestamp:
                try:
                    date = self._extract_date(timestamp)
                    if date:
                        daily_count[date] = daily_count.get(date, 0) + 1
                except:
                    continue
        
        return daily_count
    
    def _identify_peak_periods(self, logs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """识别高峰期"""
        hourly_count = self._analyze_hourly_pattern(logs)
        
        if not hourly_count:
            return []
        
        # 计算平均值和标准差
        values = list(hourly_count.values())
        mean = sum(values) / len(values)
        std = (sum((x - mean) ** 2 for x in values) / len(values)) ** 0.5
        
        # 识别高峰期（超过平均值+标准差）
        threshold = mean + std
        peak_periods = []
        
        for hour, count in hourly_count.items():
            if count > threshold:
                peak_periods.append({
                    "hour": hour,
                    "count": count,
                    "intensity": "high" if count > mean + 2 * std else "medium"
                })
        
        return sorted(peak_periods, key=lambda x: x["count"], reverse=True)
    
    def _extract_hour(self, timestamp: str) -> int:
        """从时间戳中提取小时"""
        try:
            formats = [
                "%Y-%m-%d %H:%M:%S",
                "%Y-%m-%dT%H:%M:%S",
                "%d/%b/%Y:%H:%M:%S"
            ]
            
            for fmt in formats:
                try:
                    dt = datetime.strptime(timestamp, fmt)
                    return dt.hour
                except:
                    continue
            
            # 使用正则表达式提取
            hour_match = re.search(r'(\d{2}):\d{2}:\d{2}', timestamp)
            if hour_match:
                return int(hour_match.group(1))
                
        except:
            pass
        
        return None
    
    def _extract_date(self, timestamp: str) -> str:
        """从时间戳中提取日期"""
        try:
            formats = [
                "%Y-%m-%d %H:%M:%S",
                "%Y-%m-%dT%H:%M:%S",
                "%d/%b/%Y:%H:%M:%S"
            ]
            
            for fmt in formats:
                try:
                    dt = datetime.strptime(timestamp, fmt)
                    return dt.strftime("%Y-%m-%d")
                except:
                    continue
        except:
            pass
        
        return None
    
    def get_schema(self) -> Dict[str, Any]:
        """获取工具调用模式"""
        return {
            "type": "function",
            "function": {
                "name": self.name,
                "description": self.description,
                "parameters": {
                    "type": "object",
                    "properties": {
                        "parsed_logs": {
                            "type": "array",
                            "description": "解析后的日志数据"
                        },
                        "error_type": {
                            "type": "string",
                            "enum": ["ERROR", "WARN", "FATAL", "INFO", "DEBUG"],
                            "description": "要分析的错误类型"
                        }
                    },
                    "required": ["parsed_logs"]
                }
            }
        }
