"""
实时日志记录服务
集成现有日志系统，提供详细的运行日志记录
"""
import asyncio
import json
import os
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional
from pathlib import Path
from collections import deque
import structlog

from core.config import get_settings

logger = structlog.get_logger(__name__)


class LogLevel:
    """日志级别"""
    DEBUG = "DEBUG"
    INFO = "INFO"
    WARNING = "WARNING"
    ERROR = "ERROR"
    CRITICAL = "CRITICAL"


class LogEntry:
    """日志条目"""
    
    def __init__(self, level: str, message: str, component: str, 
                 metadata: Optional[Dict[str, Any]] = None):
        self.timestamp = datetime.now()
        self.level = level
        self.message = message
        self.component = component
        self.metadata = metadata or {}
        self.log_id = f"{self.timestamp.strftime('%Y%m%d_%H%M%S')}_{id(self)}"
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            "log_id": self.log_id,
            "timestamp": self.timestamp.isoformat(),
            "level": self.level,
            "message": self.message,
            "component": self.component,
            "metadata": self.metadata
        }


class RealTimeLoggingService:
    """实时日志记录服务"""
    
    def __init__(self):
        self.settings = get_settings()
        
        # 日志缓存
        self.log_buffer = deque(maxlen=10000)  # 内存中保留最近10000条日志
        self.component_logs = {}  # 按组件分类的日志
        
        # 日志文件路径
        self.log_dir = Path("./logs")
        self.log_dir.mkdir(exist_ok=True)
        
        # 日志文件
        self.main_log_file = self.log_dir / "real_time_system.log"
        self.performance_log_file = self.log_dir / "performance.log"
        self.error_log_file = self.log_dir / "errors.log"
        self.stream_log_file = self.log_dir / "stream_processing.log"
        self.ai_log_file = self.log_dir / "ai_processing.log"
        
        # 日志统计
        self.log_stats = {
            "total_logs": 0,
            "logs_by_level": {
                LogLevel.DEBUG: 0,
                LogLevel.INFO: 0,
                LogLevel.WARNING: 0,
                LogLevel.ERROR: 0,
                LogLevel.CRITICAL: 0
            },
            "logs_by_component": {}
        }
        
        # 日志写入任务
        self.log_writer_task = None
        self.is_running = False
        self.pending_logs = asyncio.Queue()
    
    async def start_service(self):
        """启动日志服务"""
        if self.is_running:
            return
        
        self.is_running = True
        self.log_writer_task = asyncio.create_task(self._log_writer_loop())
        
        await self.log_info("real_time_logging", "实时日志服务已启动")
    
    async def stop_service(self):
        """停止日志服务"""
        if not self.is_running:
            return
        
        self.is_running = False
        
        if self.log_writer_task:
            self.log_writer_task.cancel()
            try:
                await self.log_writer_task
            except asyncio.CancelledError:
                pass
        
        # 写入剩余的日志
        await self._flush_pending_logs()
        
        await self.log_info("real_time_logging", "实时日志服务已停止")
    
    async def _log_writer_loop(self):
        """日志写入循环"""
        try:
            while self.is_running:
                try:
                    # 批量处理日志
                    logs_to_write = []
                    
                    # 收集一批日志（最多等待1秒或收集到100条）
                    timeout = 1.0
                    batch_size = 100
                    
                    try:
                        log_entry = await asyncio.wait_for(self.pending_logs.get(), timeout=timeout)
                        logs_to_write.append(log_entry)
                        
                        # 尝试收集更多日志
                        for _ in range(batch_size - 1):
                            try:
                                log_entry = self.pending_logs.get_nowait()
                                logs_to_write.append(log_entry)
                            except asyncio.QueueEmpty:
                                break
                    
                    except asyncio.TimeoutError:
                        continue
                    
                    # 批量写入日志
                    if logs_to_write:
                        await self._write_logs_batch(logs_to_write)
                
                except Exception as e:
                    print(f"日志写入循环异常: {e}")
                    await asyncio.sleep(1)
        
        except asyncio.CancelledError:
            pass
    
    async def _write_logs_batch(self, log_entries: List[LogEntry]):
        """批量写入日志"""
        try:
            # 按类型分组日志
            main_logs = []
            performance_logs = []
            error_logs = []
            stream_logs = []
            ai_logs = []
            
            for entry in log_entries:
                # 添加到主日志
                main_logs.append(entry)
                
                # 根据组件和级别分类
                if entry.component in ["performance_monitor", "resource_monitor"]:
                    performance_logs.append(entry)
                elif entry.level in [LogLevel.ERROR, LogLevel.CRITICAL]:
                    error_logs.append(entry)
                elif entry.component in ["stream_processor", "stream_pipeline", "camera_stream"]:
                    stream_logs.append(entry)
                elif entry.component.startswith("ai_") or "ai" in entry.component:
                    ai_logs.append(entry)
            
            # 写入不同的日志文件
            await self._write_to_file(self.main_log_file, main_logs)
            
            if performance_logs:
                await self._write_to_file(self.performance_log_file, performance_logs)
            
            if error_logs:
                await self._write_to_file(self.error_log_file, error_logs)
            
            if stream_logs:
                await self._write_to_file(self.stream_log_file, stream_logs)
            
            if ai_logs:
                await self._write_to_file(self.ai_log_file, ai_logs)
        
        except Exception as e:
            print(f"批量写入日志失败: {e}")
    
    async def _write_to_file(self, file_path: Path, log_entries: List[LogEntry]):
        """写入日志到文件"""
        try:
            with open(file_path, 'a', encoding='utf-8') as f:
                for entry in log_entries:
                    log_line = f"{entry.timestamp.isoformat()} [{entry.level}] {entry.component}: {entry.message}"
                    if entry.metadata:
                        log_line += f" | {json.dumps(entry.metadata, ensure_ascii=False)}"
                    f.write(log_line + "\n")
        
        except Exception as e:
            print(f"写入日志文件失败 {file_path}: {e}")
    
    async def _flush_pending_logs(self):
        """刷新待写入的日志"""
        try:
            logs_to_write = []
            
            while not self.pending_logs.empty():
                try:
                    log_entry = self.pending_logs.get_nowait()
                    logs_to_write.append(log_entry)
                except asyncio.QueueEmpty:
                    break
            
            if logs_to_write:
                await self._write_logs_batch(logs_to_write)
        
        except Exception as e:
            print(f"刷新待写入日志失败: {e}")
    
    async def _add_log(self, level: str, component: str, message: str, 
                      metadata: Optional[Dict[str, Any]] = None):
        """添加日志条目"""
        try:
            log_entry = LogEntry(level, message, component, metadata)
            
            # 添加到缓存
            self.log_buffer.append(log_entry)
            
            # 按组件分类
            if component not in self.component_logs:
                self.component_logs[component] = deque(maxlen=1000)
            self.component_logs[component].append(log_entry)
            
            # 更新统计
            self.log_stats["total_logs"] += 1
            self.log_stats["logs_by_level"][level] += 1
            
            if component not in self.log_stats["logs_by_component"]:
                self.log_stats["logs_by_component"][component] = 0
            self.log_stats["logs_by_component"][component] += 1
            
            # 添加到写入队列
            if self.is_running:
                await self.pending_logs.put(log_entry)
        
        except Exception as e:
            print(f"添加日志失败: {e}")
    
    # ==================== 公共日志方法 ====================
    
    async def log_debug(self, component: str, message: str, metadata: Optional[Dict[str, Any]] = None):
        """记录调试日志"""
        await self._add_log(LogLevel.DEBUG, component, message, metadata)
    
    async def log_info(self, component: str, message: str, metadata: Optional[Dict[str, Any]] = None):
        """记录信息日志"""
        await self._add_log(LogLevel.INFO, component, message, metadata)
    
    async def log_warning(self, component: str, message: str, metadata: Optional[Dict[str, Any]] = None):
        """记录警告日志"""
        await self._add_log(LogLevel.WARNING, component, message, metadata)
    
    async def log_error(self, component: str, message: str, metadata: Optional[Dict[str, Any]] = None):
        """记录错误日志"""
        await self._add_log(LogLevel.ERROR, component, message, metadata)
    
    async def log_critical(self, component: str, message: str, metadata: Optional[Dict[str, Any]] = None):
        """记录严重错误日志"""
        await self._add_log(LogLevel.CRITICAL, component, message, metadata)
    
    # ==================== 专用日志方法 ====================
    
    async def log_stream_event(self, camera_id: str, event_type: str, message: str, 
                              metadata: Optional[Dict[str, Any]] = None):
        """记录流处理事件"""
        full_metadata = {"camera_id": camera_id, "event_type": event_type}
        if metadata:
            full_metadata.update(metadata)
        
        await self.log_info("stream_processor", f"摄像头 {camera_id}: {message}", full_metadata)
    
    async def log_ai_processing(self, algorithm: str, camera_id: str, processing_time: float,
                               result: Optional[Dict[str, Any]] = None, error: Optional[str] = None):
        """记录AI处理日志 - 增强版本"""
        metadata = {
            "algorithm": algorithm,
            "camera_id": camera_id,
            "processing_time": processing_time,
            "performance_category": self._categorize_performance(processing_time),
            "timestamp": datetime.now().isoformat()
        }
        
        if result:
            metadata["result"] = result
            metadata["result_size"] = len(str(result))
            metadata["confidence"] = result.get("confidence", 0) if isinstance(result, dict) else 0
        
        if error:
            metadata["error"] = error
            metadata["error_type"] = self._categorize_error(error)
            metadata["retry_recommended"] = self._should_retry_error(error)
            
            await self.log_error("ai_processor", 
                                f"AI算法 {algorithm} 处理摄像头 {camera_id} 失败: {error}", 
                                metadata)
        else:
            # 根据处理时间选择日志级别
            if processing_time > 5.0:
                await self.log_warning("ai_processor", 
                                     f"AI算法 {algorithm} 处理摄像头 {camera_id} 耗时较长: {processing_time:.2f}s", 
                                     metadata)
            else:
                await self.log_info("ai_processor", 
                                   f"AI算法 {algorithm} 处理摄像头 {camera_id} 完成，耗时 {processing_time:.2f}s", 
                                   metadata)
    
    def _categorize_performance(self, processing_time: float) -> str:
        """性能分类"""
        if processing_time < 1.0:
            return "excellent"
        elif processing_time < 3.0:
            return "good"
        elif processing_time < 5.0:
            return "acceptable"
        else:
            return "poor"
    
    def _categorize_error(self, error: str) -> str:
        """错误分类"""
        error_lower = error.lower()
        if "timeout" in error_lower:
            return "timeout"
        elif "memory" in error_lower or "oom" in error_lower:
            return "memory"
        elif "gpu" in error_lower or "cuda" in error_lower:
            return "gpu"
        elif "network" in error_lower or "connection" in error_lower:
            return "network"
        elif "permission" in error_lower or "access" in error_lower:
            return "permission"
        else:
            return "unknown"
    
    def _should_retry_error(self, error: str) -> bool:
        """判断是否应该重试"""
        error_type = self._categorize_error(error)
        retry_types = ["timeout", "network", "gpu"]
        return error_type in retry_types
    
    async def log_performance_metric(self, metric_name: str, value: float, unit: str = "",
                                   threshold: Optional[float] = None):
        """记录性能指标"""
        metadata = {
            "metric_name": metric_name,
            "value": value,
            "unit": unit
        }
        
        if threshold:
            metadata["threshold"] = threshold
            metadata["exceeded"] = value > threshold
        
        level = LogLevel.WARNING if threshold and value > threshold else LogLevel.INFO
        message = f"性能指标 {metric_name}: {value}{unit}"
        
        if threshold and value > threshold:
            message += f" (超过阈值 {threshold}{unit})"
        
        await self._add_log(level, "performance_monitor", message, metadata)
    
    async def log_system_event(self, event_type: str, message: str, 
                              metadata: Optional[Dict[str, Any]] = None):
        """记录系统事件"""
        full_metadata = {"event_type": event_type}
        if metadata:
            full_metadata.update(metadata)
        
        await self.log_info("system", message, full_metadata)
    
    # ==================== 查询方法 ====================
    
    def get_recent_logs(self, limit: int = 100, level: Optional[str] = None,
                       component: Optional[str] = None) -> List[Dict[str, Any]]:
        """获取最近的日志"""
        try:
            logs = list(self.log_buffer)
            
            # 过滤条件
            if level:
                logs = [log for log in logs if log.level == level]
            
            if component:
                logs = [log for log in logs if log.component == component]
            
            # 按时间倒序排列
            logs.sort(key=lambda x: x.timestamp, reverse=True)
            
            return [log.to_dict() for log in logs[:limit]]
        
        except Exception as e:
            print(f"获取最近日志失败: {e}")
            return []
    
    def get_component_logs(self, component: str, limit: int = 100) -> List[Dict[str, Any]]:
        """获取指定组件的日志"""
        try:
            if component not in self.component_logs:
                return []
            
            logs = list(self.component_logs[component])
            logs.sort(key=lambda x: x.timestamp, reverse=True)
            
            return [log.to_dict() for log in logs[:limit]]
        
        except Exception as e:
            print(f"获取组件日志失败: {e}")
            return []
    
    def get_error_logs(self, limit: int = 100) -> List[Dict[str, Any]]:
        """获取错误日志"""
        return self.get_recent_logs(limit, LogLevel.ERROR) + self.get_recent_logs(limit, LogLevel.CRITICAL)
    
    def get_log_statistics(self) -> Dict[str, Any]:
        """获取日志统计信息"""
        return {
            "total_logs": self.log_stats["total_logs"],
            "logs_by_level": self.log_stats["logs_by_level"].copy(),
            "logs_by_component": self.log_stats["logs_by_component"].copy(),
            "buffer_size": len(self.log_buffer),
            "service_running": self.is_running
        }
    
    async def search_logs(self, keyword: str, start_time: Optional[datetime] = None,
                         end_time: Optional[datetime] = None, limit: int = 100) -> List[Dict[str, Any]]:
        """搜索日志"""
        try:
            logs = list(self.log_buffer)
            
            # 时间过滤
            if start_time:
                logs = [log for log in logs if log.timestamp >= start_time]
            
            if end_time:
                logs = [log for log in logs if log.timestamp <= end_time]
            
            # 关键词搜索
            keyword_lower = keyword.lower()
            matching_logs = []
            
            for log in logs:
                if (keyword_lower in log.message.lower() or 
                    keyword_lower in log.component.lower() or
                    any(keyword_lower in str(v).lower() for v in log.metadata.values())):
                    matching_logs.append(log)
            
            # 按时间倒序排列
            matching_logs.sort(key=lambda x: x.timestamp, reverse=True)
            
            return [log.to_dict() for log in matching_logs[:limit]]
        
        except Exception as e:
            print(f"搜索日志失败: {e}")
            return []
    
    async def export_logs(self, start_time: datetime, end_time: datetime, 
                         export_format: str = "json") -> str:
        """导出日志"""
        try:
            logs = list(self.log_buffer)
            
            # 时间过滤
            filtered_logs = [
                log for log in logs 
                if start_time <= log.timestamp <= end_time
            ]
            
            # 导出文件路径
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            export_file = self.log_dir / f"export_{timestamp}.{export_format}"
            
            if export_format == "json":
                with open(export_file, 'w', encoding='utf-8') as f:
                    json.dump([log.to_dict() for log in filtered_logs], f, 
                             ensure_ascii=False, indent=2)
            
            elif export_format == "txt":
                with open(export_file, 'w', encoding='utf-8') as f:
                    for log in filtered_logs:
                        f.write(f"{log.timestamp.isoformat()} [{log.level}] {log.component}: {log.message}\n")
            
            return str(export_file)
        
        except Exception as e:
            print(f"导出日志失败: {e}")
            return ""
    
    def cleanup_old_logs(self, days: int = 7):
        """清理旧日志文件"""
        try:
            cutoff_date = datetime.now() - timedelta(days=days)
            
            for log_file in self.log_dir.glob("*.log"):
                if log_file.stat().st_mtime < cutoff_date.timestamp():
                    log_file.unlink()
                    print(f"删除旧日志文件: {log_file}")
        
        except Exception as e:
            print(f"清理旧日志失败: {e}")
    
    # ==================== 增强的性能监控方法 ====================
    
    async def log_performance_optimization(self, optimization_type: str, 
                                         before_metrics: Dict[str, Any],
                                         after_metrics: Dict[str, Any],
                                         improvement_percentage: float):
        """记录性能优化日志"""
        metadata = {
            "optimization_type": optimization_type,
            "before_metrics": before_metrics,
            "after_metrics": after_metrics,
            "improvement_percentage": improvement_percentage,
            "optimization_timestamp": datetime.now().isoformat()
        }
        
        message = f"性能优化完成 - {optimization_type}: 改善 {improvement_percentage:.1f}%"
        
        if improvement_percentage > 20:
            await self.log_info("performance_optimizer", message, metadata)
        elif improvement_percentage > 0:
            await self.log_info("performance_optimizer", message, metadata)
        else:
            await self.log_warning("performance_optimizer", f"性能优化效果不明显 - {optimization_type}", metadata)
    
    async def log_resource_usage_alert(self, resource_type: str, current_usage: float, 
                                     threshold: float, recommended_action: str):
        """记录资源使用告警"""
        metadata = {
            "resource_type": resource_type,
            "current_usage": current_usage,
            "threshold": threshold,
            "usage_percentage": (current_usage / threshold) * 100,
            "recommended_action": recommended_action,
            "alert_timestamp": datetime.now().isoformat()
        }
        
        severity = "critical" if current_usage > threshold * 1.2 else "warning"
        message = f"资源使用告警 - {resource_type}: {current_usage:.1f} (阈值: {threshold:.1f})"
        
        if severity == "critical":
            await self.log_critical("resource_monitor", message, metadata)
        else:
            await self.log_warning("resource_monitor", message, metadata)
    
    async def log_batch_processing_stats(self, batch_id: str, batch_size: int,
                                       processing_time: float, success_count: int,
                                       error_count: int, throughput: float):
        """记录批处理统计"""
        metadata = {
            "batch_id": batch_id,
            "batch_size": batch_size,
            "processing_time": processing_time,
            "success_count": success_count,
            "error_count": error_count,
            "success_rate": (success_count / batch_size) * 100 if batch_size > 0 else 0,
            "throughput": throughput,
            "avg_item_time": processing_time / batch_size if batch_size > 0 else 0
        }
        
        message = f"批处理完成 - ID: {batch_id}, 大小: {batch_size}, 成功率: {metadata['success_rate']:.1f}%"
        
        if metadata['success_rate'] >= 95:
            await self.log_info("batch_processor", message, metadata)
        elif metadata['success_rate'] >= 80:
            await self.log_warning("batch_processor", message, metadata)
        else:
            await self.log_error("batch_processor", f"批处理成功率过低 - {message}", metadata)
    
    async def log_cache_performance(self, cache_type: str, hit_rate: float,
                                  total_requests: int, cache_size: int,
                                  memory_usage_mb: float):
        """记录缓存性能"""
        metadata = {
            "cache_type": cache_type,
            "hit_rate": hit_rate,
            "total_requests": total_requests,
            "cache_size": cache_size,
            "memory_usage_mb": memory_usage_mb,
            "performance_category": "excellent" if hit_rate > 0.8 else "good" if hit_rate > 0.6 else "poor"
        }
        
        message = f"缓存性能 - {cache_type}: 命中率 {hit_rate:.2%}, 请求数 {total_requests}"
        
        if hit_rate > 0.8:
            await self.log_info("cache_monitor", message, metadata)
        elif hit_rate > 0.5:
            await self.log_warning("cache_monitor", f"缓存命中率较低 - {message}", metadata)
        else:
            await self.log_error("cache_monitor", f"缓存命中率过低 - {message}", metadata)
    
    async def log_gpu_allocation(self, gpu_id: int, allocation_id: str,
                               memory_allocated: int, task_type: str,
                               allocation_success: bool, error_message: Optional[str] = None):
        """记录GPU分配日志"""
        metadata = {
            "gpu_id": gpu_id,
            "allocation_id": allocation_id,
            "memory_allocated": memory_allocated,
            "task_type": task_type,
            "allocation_success": allocation_success,
            "allocation_timestamp": datetime.now().isoformat()
        }
        
        if error_message:
            metadata["error_message"] = error_message
        
        if allocation_success:
            message = f"GPU分配成功 - GPU {gpu_id}: {memory_allocated}MB for {task_type}"
            await self.log_info("gpu_manager", message, metadata)
        else:
            message = f"GPU分配失败 - GPU {gpu_id}: {error_message or 'Unknown error'}"
            await self.log_error("gpu_manager", message, metadata)
    
    async def log_stream_health_check(self, camera_id: str, stream_status: str,
                                    frame_rate: float, latency_ms: float,
                                    error_count: int, last_error: Optional[str] = None):
        """记录流健康检查"""
        metadata = {
            "camera_id": camera_id,
            "stream_status": stream_status,
            "frame_rate": frame_rate,
            "latency_ms": latency_ms,
            "error_count": error_count,
            "health_score": self._calculate_stream_health_score(frame_rate, latency_ms, error_count),
            "check_timestamp": datetime.now().isoformat()
        }
        
        if last_error:
            metadata["last_error"] = last_error
        
        message = f"流健康检查 - 摄像头 {camera_id}: {stream_status}, FPS: {frame_rate:.1f}, 延迟: {latency_ms:.0f}ms"
        
        if stream_status == "active" and frame_rate > 2.0 and latency_ms < 1000:
            await self.log_info("stream_monitor", message, metadata)
        elif stream_status == "active":
            await self.log_warning("stream_monitor", f"流性能异常 - {message}", metadata)
        else:
            await self.log_error("stream_monitor", f"流状态异常 - {message}", metadata)
    
    def _calculate_stream_health_score(self, frame_rate: float, latency_ms: float, error_count: int) -> float:
        """计算流健康评分"""
        try:
            # 基础分数
            score = 100.0
            
            # 帧率评分 (期望3fps)
            if frame_rate < 1.0:
                score -= 40
            elif frame_rate < 2.0:
                score -= 20
            elif frame_rate < 2.5:
                score -= 10
            
            # 延迟评分 (期望<1000ms)
            if latency_ms > 3000:
                score -= 30
            elif latency_ms > 2000:
                score -= 20
            elif latency_ms > 1000:
                score -= 10
            
            # 错误计数评分
            if error_count > 10:
                score -= 20
            elif error_count > 5:
                score -= 10
            elif error_count > 0:
                score -= 5
            
            return max(0, score)
            
        except Exception:
            return 0.0
    
    async def generate_performance_summary(self, time_range_hours: int = 24) -> Dict[str, Any]:
        """生成性能摘要报告"""
        try:
            end_time = datetime.now()
            start_time = end_time - timedelta(hours=time_range_hours)
            
            # 过滤时间范围内的日志
            filtered_logs = [
                log for log in self.log_buffer
                if start_time <= log.timestamp <= end_time
            ]
            
            # 统计各类日志
            log_counts = defaultdict(int)
            error_types = defaultdict(int)
            performance_issues = []
            
            for log in filtered_logs:
                log_counts[log.level] += 1
                
                if log.level == LogLevel.ERROR:
                    error_type = log.metadata.get("error_type", "unknown")
                    error_types[error_type] += 1
                
                # 识别性能问题
                if log.component == "ai_processor" and log.metadata.get("processing_time", 0) > 5.0:
                    performance_issues.append({
                        "type": "slow_ai_processing",
                        "details": log.metadata,
                        "timestamp": log.timestamp.isoformat()
                    })
                elif log.component == "stream_monitor" and log.metadata.get("frame_rate", 0) < 2.0:
                    performance_issues.append({
                        "type": "low_frame_rate",
                        "details": log.metadata,
                        "timestamp": log.timestamp.isoformat()
                    })
            
            summary = {
                "time_range": {
                    "start_time": start_time.isoformat(),
                    "end_time": end_time.isoformat(),
                    "hours": time_range_hours
                },
                "log_statistics": dict(log_counts),
                "error_breakdown": dict(error_types),
                "performance_issues": performance_issues[:10],  # 最多10个问题
                "health_indicators": {
                    "error_rate": log_counts[LogLevel.ERROR] / max(len(filtered_logs), 1),
                    "warning_rate": log_counts[LogLevel.WARNING] / max(len(filtered_logs), 1),
                    "total_logs": len(filtered_logs),
                    "performance_issue_count": len(performance_issues)
                },
                "recommendations": self._generate_recommendations(log_counts, error_types, performance_issues),
                "generated_at": datetime.now().isoformat()
            }
            
            return summary
            
        except Exception as e:
            logger.error(f"Failed to generate performance summary: {e}")
            return {"error": str(e)}
    
    def _generate_recommendations(self, log_counts: Dict[str, int], 
                                error_types: Dict[str, int],
                                performance_issues: List[Dict[str, Any]]) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        # 基于错误率的建议
        total_logs = sum(log_counts.values())
        if total_logs > 0:
            error_rate = log_counts[LogLevel.ERROR] / total_logs
            
            if error_rate > 0.1:
                recommendations.append("错误率过高(>10%)，建议检查系统配置和资源分配")
            elif error_rate > 0.05:
                recommendations.append("错误率较高(>5%)，建议优化错误处理机制")
        
        # 基于错误类型的建议
        if error_types.get("timeout", 0) > 5:
            recommendations.append("超时错误频繁，建议增加处理超时时间或优化处理性能")
        
        if error_types.get("memory", 0) > 3:
            recommendations.append("内存错误较多，建议优化内存使用或增加系统内存")
        
        if error_types.get("gpu", 0) > 3:
            recommendations.append("GPU错误较多，建议检查GPU驱动和资源分配")
        
        # 基于性能问题的建议
        slow_processing_count = len([p for p in performance_issues if p["type"] == "slow_ai_processing"])
        if slow_processing_count > 10:
            recommendations.append("AI处理速度较慢，建议优化模型或增加GPU资源")
        
        low_fps_count = len([p for p in performance_issues if p["type"] == "low_frame_rate"])
        if low_fps_count > 5:
            recommendations.append("视频帧率较低，建议检查网络连接和流处理配置")
        
        return recommendations


# 全局日志服务实例
logging_service = RealTimeLoggingService()


def get_logging_service() -> RealTimeLoggingService:
    """获取日志服务实例"""
    return logging_service