"""
任务执行日志系统
结构化日志记录和MongoDB存储
"""
import os
import uuid
import logging
import traceback
from datetime import datetime
from typing import Dict, Any, Optional, List
import motor.motor_asyncio
import asyncio
from enum import Enum
from pydantic import BaseModel, Field

logger = logging.getLogger(__name__)

class LogLevel(str, Enum):
    """日志级别枚举"""
    DEBUG = "DEBUG"
    INFO = "INFO"
    WARNING = "WARNING"
    ERROR = "ERROR"
    CRITICAL = "CRITICAL"

class TaskEvent(str, Enum):
    """任务事件类型"""
    STARTED = "STARTED"
    COMPLETED = "COMPLETED"
    FAILED = "FAILED"
    RETRIED = "RETRIED"
    EXPIRED = "EXPIRED"
    REVOKED = "REVOKED"

class TaskLog(BaseModel):
    """任务日志模型"""
    task_id: str = Field(..., description="任务ID")
    task_name: str = Field(..., description="任务名称")
    event: TaskEvent = Field(..., description="事件类型")
    level: LogLevel = Field(LogLevel.INFO, description="日志级别")
    message: str = Field(..., description="日志消息")
    platform: Optional[str] = Field(None, description="平台名称")
    trace_id: Optional[str] = Field(None, description="追踪ID")
    error: Optional[str] = Field(None, description="错误信息")
    traceback: Optional[str] = Field(None, description="错误堆栈")
    metadata: Dict[str, Any] = Field(default_factory=dict, description="元数据")
    timestamp: datetime = Field(default_factory=datetime.utcnow, description="时间戳")

class TaskLogger:
    """任务日志记录器"""
    
    def __init__(self):
        self.mongo_uri = os.getenv('MONGODB_URI', 'mongodb://localhost:27017')
        self.db_name = os.getenv('MONGODB_DB', 'crawler_scheduler')
        self.client = None
        self.db = None
        self._loop = None
        
    async def connect(self):
        """连接MongoDB"""
        if not self.client:
            self.client = motor.motor_asyncio.AsyncIOMotorClient(self.mongo_uri)
            self.db = self.client[self.db_name]
            
            # 创建索引
            await self._create_indexes()
            logger.info("Connected to MongoDB for task logging")
            
    async def _create_indexes(self):
        """创建索引"""
        collection = self.db.task_logs
        
        # 创建复合索引
        await collection.create_index([
            ("task_id", 1),
            ("timestamp", -1)
        ])
        await collection.create_index([
            ("task_name", 1),
            ("timestamp", -1)
        ])
        await collection.create_index([
            ("platform", 1),
            ("timestamp", -1)
        ])
        await collection.create_index([
            ("event", 1),
            ("timestamp", -1)
        ])
        await collection.create_index([
            ("trace_id", 1)
        ])
        
        # TTL索引，自动删除30天前的日志
        await collection.create_index(
            "timestamp",
            expireAfterSeconds=30 * 24 * 3600
        )
        
    def generate_trace_id(self) -> str:
        """生成追踪ID"""
        return str(uuid.uuid4())
        
    async def log_task_event(self, log: TaskLog):
        """
        记录任务事件
        
        Args:
            log: 任务日志
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.task_logs
        
        # 转换为字典并保存
        log_dict = log.dict()
        await collection.insert_one(log_dict)
        
        # 同时记录到Python日志
        level = getattr(logging, log.level)
        logger.log(level, f"[{log.task_id}] {log.event}: {log.message}")
        
    def log_sync(self, log: TaskLog):
        """同步日志方法"""
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        loop.run_until_complete(self.log_task_event(log))
        loop.close()
        
    async def log_task_start(self, task_id: str, task_name: str, 
                            platform: Optional[str] = None,
                            trace_id: Optional[str] = None,
                            metadata: Optional[Dict] = None):
        """记录任务开始"""
        log = TaskLog(
            task_id=task_id,
            task_name=task_name,
            event=TaskEvent.STARTED,
            level=LogLevel.INFO,
            message=f"Task {task_name} started",
            platform=platform,
            trace_id=trace_id or self.generate_trace_id(),
            metadata=metadata or {}
        )
        await self.log_task_event(log)
        
    async def log_task_complete(self, task_id: str, task_name: str,
                               result: Any = None,
                               platform: Optional[str] = None,
                               trace_id: Optional[str] = None,
                               metadata: Optional[Dict] = None):
        """记录任务完成"""
        metadata = metadata or {}
        if result:
            metadata['result'] = str(result)
            
        log = TaskLog(
            task_id=task_id,
            task_name=task_name,
            event=TaskEvent.COMPLETED,
            level=LogLevel.INFO,
            message=f"Task {task_name} completed successfully",
            platform=platform,
            trace_id=trace_id,
            metadata=metadata
        )
        await self.log_task_event(log)
        
    async def log_task_failure(self, task_id: str, task_name: str,
                              error: Exception,
                              platform: Optional[str] = None,
                              trace_id: Optional[str] = None,
                              metadata: Optional[Dict] = None):
        """记录任务失败"""
        log = TaskLog(
            task_id=task_id,
            task_name=task_name,
            event=TaskEvent.FAILED,
            level=LogLevel.ERROR,
            message=f"Task {task_name} failed",
            platform=platform,
            trace_id=trace_id,
            error=str(error),
            traceback=traceback.format_exc(),
            metadata=metadata or {}
        )
        await self.log_task_event(log)
        
    async def log_task_retry(self, task_id: str, task_name: str,
                            retry_count: int,
                            error: Exception,
                            platform: Optional[str] = None,
                            trace_id: Optional[str] = None):
        """记录任务重试"""
        log = TaskLog(
            task_id=task_id,
            task_name=task_name,
            event=TaskEvent.RETRIED,
            level=LogLevel.WARNING,
            message=f"Task {task_name} retrying (attempt {retry_count})",
            platform=platform,
            trace_id=trace_id,
            error=str(error),
            metadata={'retry_count': retry_count}
        )
        await self.log_task_event(log)
        
    async def get_task_logs(self, task_id: str) -> List[Dict]:
        """
        获取任务日志
        
        Args:
            task_id: 任务ID
            
        Returns:
            日志列表
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.task_logs
        
        cursor = collection.find(
            {'task_id': task_id}
        ).sort('timestamp', 1)
        
        logs = []
        async for doc in cursor:
            doc['_id'] = str(doc['_id'])
            logs.append(doc)
            
        return logs
        
    async def get_recent_logs(self, limit: int = 100,
                            platform: Optional[str] = None,
                            event: Optional[TaskEvent] = None,
                            level: Optional[LogLevel] = None) -> List[Dict]:
        """
        获取最近的日志
        
        Args:
            limit: 限制数量
            platform: 平台筛选
            event: 事件筛选
            level: 级别筛选
            
        Returns:
            日志列表
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.task_logs
        
        # 构建查询条件
        query = {}
        if platform:
            query['platform'] = platform
        if event:
            query['event'] = event
        if level:
            query['level'] = level
            
        cursor = collection.find(query).sort('timestamp', -1).limit(limit)
        
        logs = []
        async for doc in cursor:
            doc['_id'] = str(doc['_id'])
            logs.append(doc)
            
        return logs
        
    async def get_error_logs(self, since: datetime, limit: int = 100) -> List[Dict]:
        """
        获取错误日志
        
        Args:
            since: 起始时间
            limit: 限制数量
            
        Returns:
            错误日志列表
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.task_logs
        
        cursor = collection.find({
            'level': {'$in': [LogLevel.ERROR, LogLevel.CRITICAL]},
            'timestamp': {'$gte': since}
        }).sort('timestamp', -1).limit(limit)
        
        logs = []
        async for doc in cursor:
            doc['_id'] = str(doc['_id'])
            logs.append(doc)
            
        return logs
        
    async def get_task_stats(self, start_time: datetime, end_time: datetime,
                           platform: Optional[str] = None) -> Dict:
        """
        获取任务统计
        
        Args:
            start_time: 开始时间
            end_time: 结束时间
            platform: 平台筛选
            
        Returns:
            统计信息
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.task_logs
        
        # 构建聚合管道
        match_stage = {
            'timestamp': {
                '$gte': start_time,
                '$lte': end_time
            }
        }
        
        if platform:
            match_stage['platform'] = platform
            
        pipeline = [
            {'$match': match_stage},
            {'$group': {
                '_id': '$event',
                'count': {'$sum': 1}
            }}
        ]
        
        cursor = collection.aggregate(pipeline)
        
        stats = {
            'start_time': start_time.isoformat(),
            'end_time': end_time.isoformat(),
            'events': {}
        }
        
        async for doc in cursor:
            stats['events'][doc['_id']] = doc['count']
            
        # 计算成功率
        total_completed = stats['events'].get(TaskEvent.COMPLETED, 0)
        total_failed = stats['events'].get(TaskEvent.FAILED, 0)
        total_tasks = total_completed + total_failed
        
        if total_tasks > 0:
            stats['success_rate'] = total_completed / total_tasks
        else:
            stats['success_rate'] = 0
            
        return stats
        
    def update_log_level(self, level: str):
        """
        动态更新日志级别
        
        Args:
            level: 日志级别
        """
        numeric_level = getattr(logging, level.upper(), logging.INFO)
        logging.getLogger().setLevel(numeric_level)
        logger.info(f"Updated log level to {level}")

# 创建全局日志记录器实例
task_logger = TaskLogger()