import logging
import yaml
import os
import importlib
from typing import Dict, List, Any, Optional, Type
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from datetime import datetime
from .base_crawler import BaseCrawler

class CrawlerManager:
    """
    爬虫管理器，负责管理和调度所有爬虫
    """
    
    def __init__(self, config_path: str):
        """
        初始化爬虫管理器
        
        Args:
            config_path: 配置文件路径
        """
        self.logger = logging.getLogger("crawler.manager")
        self.config_path = config_path
        self.config = self._load_config()
        self.crawler_classes = {}
        self.crawlers = {}
        self.register_crawlers()
        
    def _load_config(self) -> Dict[str, Any]:
        """
        加载配置文件
        
        Returns:
            配置数据
        """
        try:
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            return config
        except Exception as e:
            self.logger.error(f"Failed to load config: {str(e)}")
            return {}
    
    def register_crawler(self, source_type: str, source_name: str, crawler_class: Type[BaseCrawler]) -> None:
        """
        注册爬虫类
        
        Args:
            source_type: 数据源类型
            source_name: 数据源名称
            crawler_class: 爬虫类
        """
        key = f"{source_type}.{source_name}"
        self.crawler_classes[key] = crawler_class
        self.logger.info(f"Registered crawler: {key} -> {crawler_class.__name__}")
    
    def register_crawlers(self) -> None:
        """
        注册所有爬虫
        """
        # 导入爬虫模块
        from . import ArxivCrawler, OpenAIBlogCrawler
        
        # 注册特定爬虫
        self.register_crawler('papers', 'arXiv', ArxivCrawler)
        self.register_crawler('companies', 'OpenAI Blog', OpenAIBlogCrawler)
        
        # 初始化所有爬虫实例
        self._init_crawlers()
    
    def _init_crawlers(self) -> None:
        """
        初始化所有爬虫实例
        """
        if not self.config or 'sources' not in self.config:
            self.logger.error("No sources found in config")
            return
            
        sources = self.config.get('sources', {})
        for source_type, source_list in sources.items():
            for source_config in source_list:
                source_name = source_config.get('name')
                if not source_name:
                    continue
                    
                key = f"{source_type}.{source_name}"
                crawler_class = self.crawler_classes.get(key)
                
                if crawler_class:
                    try:
                        crawler = crawler_class(source_config)
                        self.crawlers[key] = crawler
                        self.logger.info(f"Initialized crawler: {key}")
                    except Exception as e:
                        self.logger.error(f"Failed to initialize crawler {key}: {str(e)}")
                else:
                    self.logger.warning(f"No crawler class registered for {key}")
    
    def get_crawler(self, source_type: str, source_name: str) -> Optional[BaseCrawler]:
        """
        获取特定爬虫实例
        
        Args:
            source_type: 数据源类型
            source_name: 数据源名称
            
        Returns:
            爬虫实例
        """
        key = f"{source_type}.{source_name}"
        return self.crawlers.get(key)
    
    def crawl_all(self, max_workers: int = 4) -> Dict[str, List[Dict[str, Any]]]:
        """
        执行所有启用的爬虫
        
        Args:
            max_workers: 最大线程数
            
        Returns:
            爬取的所有数据
        """
        results = {}
        start_time = time.time()
        self.logger.info(f"Starting crawl_all with {len(self.crawlers)} crawlers")
        
        # 使用线程池并行执行爬虫
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_crawler = {
                executor.submit(crawler.crawl): (key, crawler) 
                for key, crawler in self.crawlers.items() 
                if crawler.enabled
            }
            
            for future in as_completed(future_to_crawler):
                key, crawler = future_to_crawler[future]
                try:
                    data = future.result()
                    results[key] = data
                    self.logger.info(f"Crawler {key} completed, {len(data)} items crawled")
                except Exception as e:
                    self.logger.error(f"Crawler {key} failed: {str(e)}")
        
        end_time = time.time()
        total_items = sum(len(items) for items in results.values())
        self.logger.info(f"crawl_all completed in {end_time - start_time:.2f}s, {total_items} items crawled")
        
        return results
    
    def crawl_by_type(self, source_type: str) -> Dict[str, List[Dict[str, Any]]]:
        """
        按类型执行爬虫
        
        Args:
            source_type: 数据源类型
            
        Returns:
            爬取的数据
        """
        results = {}
        target_crawlers = {
            key: crawler for key, crawler in self.crawlers.items() 
            if key.startswith(f"{source_type}.") and crawler.enabled
        }
        
        self.logger.info(f"Starting crawl_by_type({source_type}) with {len(target_crawlers)} crawlers")
        
        for key, crawler in target_crawlers.items():
            try:
                data = crawler.crawl()
                results[key] = data
                self.logger.info(f"Crawler {key} completed, {len(data)} items crawled")
            except Exception as e:
                self.logger.error(f"Crawler {key} failed: {str(e)}")
        
        return results
    
    def crawl_single(self, source_type: str, source_name: str) -> List[Dict[str, Any]]:
        """
        执行单个爬虫
        
        Args:
            source_type: 数据源类型
            source_name: 数据源名称
            
        Returns:
            爬取的数据
        """
        crawler = self.get_crawler(source_type, source_name)
        if not crawler:
            self.logger.error(f"No crawler found for {source_type}.{source_name}")
            return []
            
        if not crawler.enabled:
            self.logger.warning(f"Crawler {source_type}.{source_name} is disabled")
            return []
            
        try:
            data = crawler.crawl()
            self.logger.info(f"Crawler {source_type}.{source_name} completed, {len(data)} items crawled")
            return data
        except Exception as e:
            self.logger.error(f"Crawler {source_type}.{source_name} failed: {str(e)}")
            return [] 