"""
失败重试机制
实现指数退避、死信队列等重试策略
"""
import os
import time
import logging
from typing import Dict, Any, Optional, Type
from datetime import datetime, timedelta
from celery import Task
from celery.exceptions import Retry, MaxRetriesExceededError
import motor.motor_asyncio
import asyncio
import redis

logger = logging.getLogger(__name__)

class RetryStrategy:
    """重试策略基类"""
    
    def calculate_delay(self, retry_count: int) -> int:
        """
        计算重试延迟
        
        Args:
            retry_count: 重试次数
            
        Returns:
            延迟秒数
        """
        raise NotImplementedError
        
class ExponentialBackoff(RetryStrategy):
    """指数退避策略"""
    
    def __init__(self, base_delay: int = 60, max_delay: int = 3600, factor: float = 2.0):
        """
        初始化
        
        Args:
            base_delay: 基础延迟（秒）
            max_delay: 最大延迟（秒）
            factor: 指数因子
        """
        self.base_delay = base_delay
        self.max_delay = max_delay
        self.factor = factor
        
    def calculate_delay(self, retry_count: int) -> int:
        """计算指数退避延迟"""
        delay = self.base_delay * (self.factor ** retry_count)
        return min(int(delay), self.max_delay)
        
class LinearBackoff(RetryStrategy):
    """线性退避策略"""
    
    def __init__(self, base_delay: int = 60, increment: int = 60, max_delay: int = 3600):
        """
        初始化
        
        Args:
            base_delay: 基础延迟
            increment: 每次增加的延迟
            max_delay: 最大延迟
        """
        self.base_delay = base_delay
        self.increment = increment
        self.max_delay = max_delay
        
    def calculate_delay(self, retry_count: int) -> int:
        """计算线性退避延迟"""
        delay = self.base_delay + (retry_count * self.increment)
        return min(delay, self.max_delay)

class RetryManager:
    """重试管理器"""
    
    def __init__(self):
        self.redis_client = redis.Redis(
            host=os.getenv('REDIS_HOST', 'localhost'),
            port=int(os.getenv('REDIS_PORT', 6379)),
            db=3,  # 使用独立的DB存储重试信息
            decode_responses=True
        )
        
        # 异常重试策略配置
        self.exception_strategies = {
            # 网络异常：快速重试
            'ConnectionError': ExponentialBackoff(base_delay=30, max_delay=300),
            'TimeoutError': ExponentialBackoff(base_delay=30, max_delay=300),
            
            # API限流：较长延迟
            'RateLimitError': ExponentialBackoff(base_delay=300, max_delay=1800),
            'QuotaExceededError': LinearBackoff(base_delay=600, increment=600, max_delay=3600),
            
            # 数据异常：中等延迟
            'ParseError': LinearBackoff(base_delay=60, increment=60, max_delay=600),
            'ValidationError': LinearBackoff(base_delay=120, increment=120, max_delay=1200),
            
            # 默认策略
            'default': ExponentialBackoff(base_delay=60, max_delay=3600)
        }
        
        # 平台特定的最大重试次数
        self.platform_max_retries = {
            'weibo': 5,
            'zhihu': 4,
            'toutiao': 5,
            'baidu': 5,
            'xiaohongshu': 3,
            'douyin': 3,
            'twitter': 4,
            'reddit': 5,
            'youtube': 3,
        }
        
    def get_retry_strategy(self, exception: Exception) -> RetryStrategy:
        """
        根据异常类型获取重试策略
        
        Args:
            exception: 异常对象
            
        Returns:
            重试策略
        """
        exception_name = exception.__class__.__name__
        return self.exception_strategies.get(exception_name, 
                                            self.exception_strategies['default'])
                                            
    def get_max_retries(self, platform: Optional[str] = None) -> int:
        """
        获取最大重试次数
        
        Args:
            platform: 平台名称
            
        Returns:
            最大重试次数
        """
        if platform:
            return self.platform_max_retries.get(platform, 3)
        return 3
        
    def should_retry(self, task_id: str, exception: Exception, 
                    platform: Optional[str] = None) -> tuple[bool, int]:
        """
        判断是否应该重试
        
        Args:
            task_id: 任务ID
            exception: 异常对象
            platform: 平台名称
            
        Returns:
            (是否重试, 延迟秒数)
        """
        # 获取当前重试次数
        retry_key = f'retry:{task_id}'
        retry_count = self.redis_client.incr(retry_key)
        self.redis_client.expire(retry_key, 86400)  # 24小时过期
        
        # 检查是否超过最大重试次数
        max_retries = self.get_max_retries(platform)
        if retry_count > max_retries:
            logger.warning(f"Task {task_id} exceeded max retries ({max_retries})")
            return False, 0
            
        # 获取重试策略并计算延迟
        strategy = self.get_retry_strategy(exception)
        delay = strategy.calculate_delay(retry_count - 1)
        
        # 记录重试历史
        self.record_retry_history(task_id, exception, retry_count, delay)
        
        return True, delay
        
    def record_retry_history(self, task_id: str, exception: Exception, 
                           retry_count: int, delay: int):
        """
        记录重试历史
        
        Args:
            task_id: 任务ID
            exception: 异常对象
            retry_count: 重试次数
            delay: 延迟秒数
        """
        history_key = f'retry_history:{task_id}'
        history_entry = {
            'retry_count': retry_count,
            'exception': str(exception),
            'exception_type': exception.__class__.__name__,
            'delay': delay,
            'timestamp': datetime.utcnow().isoformat()
        }
        
        # 使用列表存储历史记录
        import json
        self.redis_client.rpush(history_key, json.dumps(history_entry))
        self.redis_client.expire(history_key, 86400 * 7)  # 7天过期
        
    def get_retry_history(self, task_id: str) -> list:
        """
        获取重试历史
        
        Args:
            task_id: 任务ID
            
        Returns:
            重试历史列表
        """
        history_key = f'retry_history:{task_id}'
        history = self.redis_client.lrange(history_key, 0, -1)
        
        import json
        return [json.loads(entry) for entry in history]
        
    def reset_retry_count(self, task_id: str):
        """
        重置重试计数
        
        Args:
            task_id: 任务ID
        """
        retry_key = f'retry:{task_id}'
        self.redis_client.delete(retry_key)
        
class DeadLetterQueue:
    """死信队列管理"""
    
    def __init__(self):
        self.mongo_uri = os.getenv('MONGODB_URI', 'mongodb://localhost:27017')
        self.db_name = os.getenv('MONGODB_DB', 'crawler_scheduler')
        self.client = None
        self.db = None
        
    async def connect(self):
        """连接MongoDB"""
        if not self.client:
            self.client = motor.motor_asyncio.AsyncIOMotorClient(self.mongo_uri)
            self.db = self.client[self.db_name]
            
    async def add_failed_task(self, task_id: str, task_name: str,
                             args: tuple, kwargs: dict,
                             exception: str, traceback: str,
                             retry_history: list,
                             platform: Optional[str] = None):
        """
        添加失败任务到死信队列
        
        Args:
            task_id: 任务ID
            task_name: 任务名称
            args: 任务参数
            kwargs: 任务关键字参数
            exception: 异常信息
            traceback: 堆栈信息
            retry_history: 重试历史
            platform: 平台名称
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.dead_letter_queue
        
        document = {
            'task_id': task_id,
            'task_name': task_name,
            'args': args,
            'kwargs': kwargs,
            'exception': exception,
            'traceback': traceback,
            'retry_history': retry_history,
            'platform': platform,
            'status': 'failed',
            'created_at': datetime.utcnow(),
            'can_resubmit': True
        }
        
        await collection.insert_one(document)
        logger.info(f"Task {task_id} added to dead letter queue")
        
    async def get_failed_tasks(self, limit: int = 100,
                              platform: Optional[str] = None) -> list:
        """
        获取失败任务
        
        Args:
            limit: 限制数量
            platform: 平台筛选
            
        Returns:
            失败任务列表
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.dead_letter_queue
        
        query = {'status': 'failed'}
        if platform:
            query['platform'] = platform
            
        cursor = collection.find(query).sort('created_at', -1).limit(limit)
        
        tasks = []
        async for doc in cursor:
            doc['_id'] = str(doc['_id'])
            tasks.append(doc)
            
        return tasks
        
    async def resubmit_task(self, task_id: str) -> bool:
        """
        重新提交任务
        
        Args:
            task_id: 任务ID
            
        Returns:
            是否成功
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.dead_letter_queue
        
        # 获取任务信息
        task_doc = await collection.find_one({
            'task_id': task_id,
            'status': 'failed',
            'can_resubmit': True
        })
        
        if not task_doc:
            logger.warning(f"Task {task_id} not found or cannot be resubmitted")
            return False
            
        # 导入Celery应用
        from src.celery_app import app
        
        # 重新提交任务
        task = app.tasks.get(task_doc['task_name'])
        if task:
            task.apply_async(
                args=task_doc['args'],
                kwargs=task_doc['kwargs'],
                task_id=f"{task_id}_resubmit_{int(time.time())}"
            )
            
            # 更新状态
            await collection.update_one(
                {'task_id': task_id},
                {'$set': {
                    'status': 'resubmitted',
                    'resubmitted_at': datetime.utcnow()
                }}
            )
            
            logger.info(f"Task {task_id} resubmitted successfully")
            return True
        else:
            logger.error(f"Task {task_doc['task_name']} not found")
            return False
            
    async def cleanup_old_tasks(self, days: int = 30):
        """
        清理旧任务
        
        Args:
            days: 保留天数
        """
        if not self.db:
            await self.connect()
            
        collection = self.db.dead_letter_queue
        
        cutoff_date = datetime.utcnow() - timedelta(days=days)
        
        result = await collection.delete_many({
            'created_at': {'$lt': cutoff_date}
        })
        
        logger.info(f"Cleaned up {result.deleted_count} old tasks from dead letter queue")

# 创建全局实例
retry_manager = RetryManager()
dead_letter_queue = DeadLetterQueue()