"""爬虫管理器
统一管理和调度所有爬虫
"""
import asyncio
import logging
from typing import Dict, List, Optional, Type, Any
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict

from ..spiders.enhanced_base_spider import EnhancedBaseSpider
from ..models.hot_item import HotItemBatch, Platform
from ..utils.config_manager import config_manager

logger = logging.getLogger(__name__)


class SpiderRegistry:
    """爬虫注册表"""
    
    def __init__(self):
        self._spiders: Dict[str, Type[EnhancedBaseSpider]] = {}
    
    def register(self, platform: str, spider_class: Type[EnhancedBaseSpider]):
        """注册爬虫
        
        Args:
            platform: 平台标识
            spider_class: 爬虫类
        """
        self._spiders[platform] = spider_class
        logger.info(f"Registered spider for {platform}: {spider_class.__name__}")
    
    def get(self, platform: str) -> Optional[Type[EnhancedBaseSpider]]:
        """获取爬虫类
        
        Args:
            platform: 平台标识
            
        Returns:
            爬虫类
        """
        return self._spiders.get(platform)
    
    def list_platforms(self) -> List[str]:
        """列出所有已注册的平台
        
        Returns:
            平台列表
        """
        return list(self._spiders.keys())
    
    def create_spider(self, platform: str) -> Optional[EnhancedBaseSpider]:
        """创建爬虫实例
        
        Args:
            platform: 平台标识
            
        Returns:
            爬虫实例
        """
        spider_class = self.get(platform)
        if spider_class:
            return spider_class()
        return None


class CrawlTask:
    """爬取任务"""
    
    def __init__(
        self,
        platform: str,
        priority: int = 5,
        fetch_details: bool = False,
        limit: Optional[int] = None,
        callback: Optional[callable] = None
    ):
        """初始化爬取任务
        
        Args:
            platform: 平台标识
            priority: 优先级（1-10，1最高）
            fetch_details: 是否获取详情
            limit: 限制数量
            callback: 回调函数
        """
        self.platform = platform
        self.priority = priority
        self.fetch_details = fetch_details
        self.limit = limit
        self.callback = callback
        self.created_at = datetime.now()
        self.status = 'pending'
        self.result: Optional[HotItemBatch] = None
        self.error: Optional[str] = None
    
    def __lt__(self, other):
        """用于优先队列排序"""
        return self.priority < other.priority


class SpiderManager:
    """爬虫管理器"""
    
    def __init__(self, max_concurrent: int = 5):
        """初始化爬虫管理器
        
        Args:
            max_concurrent: 最大并发数
        """
        self.registry = SpiderRegistry()
        self.max_concurrent = max_concurrent
        self.semaphore = asyncio.Semaphore(max_concurrent)
        
        # 任务队列
        self.task_queue: asyncio.PriorityQueue = None
        self.running_tasks: Dict[str, CrawlTask] = {}
        
        # 统计信息
        self.stats = defaultdict(lambda: {
            'total_requests': 0,
            'success': 0,
            'failed': 0,
            'total_items': 0,
            'last_crawl': None,
            'average_time': 0
        })
        
        # 错误处理
        self.error_threshold = 5  # 错误阈值
        self.error_counts = defaultdict(int)
        
        # 自动注册爬虫
        self._auto_register_spiders()
    
    def _auto_register_spiders(self):
        """自动注册所有爬虫"""
        try:
            # 导入并注册重构后的爬虫
            from ..spiders.refactored_zhihu_spider import RefactoredZhihuSpider
            self.registry.register('zhihu', RefactoredZhihuSpider)
            
            # TODO: 注册其他重构后的爬虫
            # from ..spiders.refactored_weibo_spider import RefactoredWeiboSpider
            # self.registry.register('weibo', RefactoredWeiboSpider)
            
        except ImportError as e:
            logger.error(f"Failed to import spiders: {e}")
    
    async def start(self):
        """启动管理器"""
        self.task_queue = asyncio.PriorityQueue()
        logger.info("Spider manager started")
    
    async def stop(self):
        """停止管理器"""
        # 等待所有任务完成
        if self.task_queue:
            await self.task_queue.join()
        
        # 关闭所有爬虫
        for platform in self.running_tasks:
            spider = self.registry.create_spider(platform)
            if spider:
                await spider.close()
        
        logger.info("Spider manager stopped")
    
    async def crawl(
        self,
        platform: str,
        fetch_details: bool = False,
        limit: Optional[int] = None,
        priority: int = 5
    ) -> Optional[HotItemBatch]:
        """执行爬取任务
        
        Args:
            platform: 平台标识
            fetch_details: 是否获取详情
            limit: 限制数量
            priority: 优先级
            
        Returns:
            爬取结果
        """
        # 检查平台是否已注册
        if platform not in self.registry.list_platforms():
            logger.error(f"Platform {platform} not registered")
            return None
        
        # 检查平台是否启用
        if not config_manager.get_platform_config(platform).get('enabled', False):
            logger.warning(f"Platform {platform} is disabled")
            return None
        
        # 检查错误阈值
        if self.error_counts[platform] >= self.error_threshold:
            logger.error(f"Platform {platform} has too many errors, skipping")
            return None
        
        # 创建任务
        task = CrawlTask(
            platform=platform,
            fetch_details=fetch_details,
            limit=limit,
            priority=priority
        )
        
        # 执行爬取
        return await self._execute_task(task)
    
    async def _execute_task(self, task: CrawlTask) -> Optional[HotItemBatch]:
        """执行爬取任务
        
        Args:
            task: 爬取任务
            
        Returns:
            爬取结果
        """
        async with self.semaphore:
            start_time = datetime.now()
            task.status = 'running'
            self.running_tasks[task.platform] = task
            
            try:
                # 创建爬虫实例
                spider = self.registry.create_spider(task.platform)
                if not spider:
                    raise ValueError(f"Failed to create spider for {task.platform}")
                
                async with spider:
                    # 执行爬取
                    if hasattr(spider, 'fetch_with_details') and task.fetch_details:
                        result = await spider.fetch_with_details(limit=task.limit)
                    elif hasattr(spider, 'fetch_hot_list'):
                        result = await spider.fetch_hot_list()
                    else:
                        # 使用基础爬取方法
                        items = await spider.crawl()
                        result = HotItemBatch(
                            platform=Platform(task.platform),
                            items=items,
                            total_count=len(items)
                        )
                    
                    task.result = result
                    task.status = 'completed'
                    
                    # 更新统计信息
                    self._update_stats(task.platform, True, result.total_count, start_time)
                    
                    # 重置错误计数
                    self.error_counts[task.platform] = 0
                    
                    # 执行回调
                    if task.callback:
                        await task.callback(result)
                    
                    return result
                    
            except Exception as e:
                task.error = str(e)
                task.status = 'failed'
                
                # 更新错误计数
                self.error_counts[task.platform] += 1
                
                # 更新统计信息
                self._update_stats(task.platform, False, 0, start_time)
                
                logger.error(f"Task failed for {task.platform}: {e}")
                return None
                
            finally:
                # 从运行任务中移除
                if task.platform in self.running_tasks:
                    del self.running_tasks[task.platform]
    
    async def crawl_multiple(
        self,
        platforms: List[str],
        fetch_details: bool = False,
        limit: Optional[int] = None
    ) -> Dict[str, HotItemBatch]:
        """并发爬取多个平台
        
        Args:
            platforms: 平台列表
            fetch_details: 是否获取详情
            limit: 限制数量
            
        Returns:
            爬取结果字典
        """
        tasks = []
        
        for platform in platforms:
            # 根据配置获取优先级
            priority = config_manager.get_platform_config(platform).get('priority', 5)
            
            task = self.crawl(
                platform=platform,
                fetch_details=fetch_details,
                limit=limit,
                priority=priority
            )
            tasks.append(task)
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 整理结果
        output = {}
        for platform, result in zip(platforms, results):
            if isinstance(result, Exception):
                logger.error(f"Error crawling {platform}: {result}")
                output[platform] = None
            else:
                output[platform] = result
        
        return output
    
    async def crawl_all_enabled(
        self,
        fetch_details: bool = False,
        limit: Optional[int] = None
    ) -> Dict[str, HotItemBatch]:
        """爬取所有启用的平台
        
        Args:
            fetch_details: 是否获取详情
            limit: 限制数量
            
        Returns:
            爬取结果字典
        """
        enabled_platforms = config_manager.get_enabled_platforms()
        
        # 过滤已注册的平台
        available_platforms = [
            p for p in enabled_platforms
            if p in self.registry.list_platforms()
        ]
        
        if not available_platforms:
            logger.warning("No available platforms to crawl")
            return {}
        
        logger.info(f"Crawling {len(available_platforms)} platforms: {available_platforms}")
        
        return await self.crawl_multiple(
            platforms=available_platforms,
            fetch_details=fetch_details,
            limit=limit
        )
    
    def _update_stats(
        self,
        platform: str,
        success: bool,
        item_count: int,
        start_time: datetime
    ):
        """更新统计信息
        
        Args:
            platform: 平台标识
            success: 是否成功
            item_count: 获取的数据数量
            start_time: 开始时间
        """
        stats = self.stats[platform]
        stats['total_requests'] += 1
        
        if success:
            stats['success'] += 1
            stats['total_items'] += item_count
        else:
            stats['failed'] += 1
        
        stats['last_crawl'] = datetime.now()
        
        # 计算平均耗时
        duration = (datetime.now() - start_time).total_seconds()
        if stats['average_time'] == 0:
            stats['average_time'] = duration
        else:
            # 移动平均
            stats['average_time'] = (stats['average_time'] * 0.9 + duration * 0.1)
    
    def get_stats(self, platform: Optional[str] = None) -> Dict[str, Any]:
        """获取统计信息
        
        Args:
            platform: 平台标识，如果为空返回所有平台
            
        Returns:
            统计信息
        """
        if platform:
            return dict(self.stats[platform])
        return dict(self.stats)
    
    def reset_error_count(self, platform: str):
        """重置错误计数
        
        Args:
            platform: 平台标识
        """
        self.error_counts[platform] = 0
        logger.info(f"Reset error count for {platform}")
    
    async def schedule_periodic_crawl(
        self,
        interval_minutes: int = 30,
        fetch_details: bool = False
    ):
        """定期爬取任务
        
        Args:
            interval_minutes: 间隔时间（分钟）
            fetch_details: 是否获取详情
        """
        logger.info(f"Starting periodic crawl every {interval_minutes} minutes")
        
        while True:
            try:
                # 爬取所有启用的平台
                results = await self.crawl_all_enabled(fetch_details=fetch_details)
                
                # 输出结果摘要
                for platform, batch in results.items():
                    if batch:
                        logger.info(
                            f"{platform}: Fetched {batch.total_count} items, "
                            f"top item: {batch.items[0].title if batch.items else 'N/A'}"
                        )
                
                # 等待下一次爬取
                await asyncio.sleep(interval_minutes * 60)
                
            except Exception as e:
                logger.error(f"Error in periodic crawl: {e}")
                await asyncio.sleep(60)  # 出错后等待1分钟重试


# 创建全局爬虫管理器实例
spider_manager = SpiderManager()