"""
爬虫服务主入口
专注于爬虫逻辑，提供状态反馈机制
"""
import asyncio
import uuid
from typing import Dict, List, Callable, Optional
from datetime import datetime
from enum import Enum

# 导入爬虫模块
from spiders import SpiderFactory

class CrawlStatus(Enum):
    """爬取状态枚举"""
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"
    PAUSED = "paused"

class CrawlTask:
    """爬取任务类"""
    def __init__(self, task_id: str, query: str, sources: List[str]):
        self.task_id = task_id
        self.query = query
        self.sources = sources
        self.status = CrawlStatus.PENDING
        self.start_time = None
        self.end_time = None
        self.total_sources = len(sources)
        self.completed_sources = 0
        self.failed_sources = 0
        self.source_status = {}  # 每个数据源的状态
        self.results = []
        self.error_message = None
        
    def update_source_status(self, source: str, status: CrawlStatus, message: str = ""):
        """更新数据源状态"""
        self.source_status[source] = {
            "status": status,
            "message": message,
            "updated_at": datetime.now().isoformat()
        }
        
        if status == CrawlStatus.COMPLETED:
            self.completed_sources += 1
        elif status == CrawlStatus.FAILED:
            self.failed_sources += 1

class CrawlerService:
    """爬虫服务核心类"""
    
    def __init__(self, status_callback: Optional[Callable] = None, config: Optional[Dict] = None):
        """
        初始化爬虫服务
        
        Args:
            status_callback: 状态回调函数，用于实时反馈爬取状态
            config: 爬虫配置
        """
        self.status_callback = status_callback
        self.config = config or {}
        self.tasks: Dict[str, CrawlTask] = {}
        
        # 初始化爬虫工厂
        self._init_spiders()
        
    def _init_spiders(self):
        """初始化爬虫实例"""
        self.spiders = {}
        available_spiders = SpiderFactory.list_spiders()
        
        for spider_name in available_spiders:
            try:
                # 获取爬虫配置
                spider_config = self.config.get(spider_name, {})
                spider = SpiderFactory.create(
                    spider_name,
                    status_callback=self._spider_status_callback,
                    **spider_config  # 这里包含 search_queries / api_key 等
                )
                
                # 设置状态回调
                spider.set_status_callback(self._spider_status_callback)
                self.spiders[spider_name] = spider
                
            except Exception as e:
                print(f"初始化爬虫 {spider_name} 失败: {e}")
        
    def _spider_status_callback(self, spider_name: str, status_info: dict):
        """爬虫状态回调"""
        # 这里可以处理单个爬虫的状态更新
        print(f"爬虫 {spider_name} 状态: {status_info['status']}")
        
    def _notify_status(self, task_id: str, status_data: dict):
        """通知状态更新"""
        if self.status_callback:
            try:
                self.status_callback(task_id, status_data)
            except Exception as e:
                print(f"状态回调失败: {e}")
    
    async def start_crawl(self, query: str, sources: List[str] = None) -> str:
        """
        启动爬取任务
        
        Args:
            query: 搜索查询
            sources: 数据源列表，默认使用所有可用数据源
            
        Returns:
            task_id: 任务ID
        """
        if sources is None:
            sources = list(self.spiders.keys())  # 使用所有可用的爬虫
            
        # 验证数据源是否可用
        available_sources = []
        for source in sources:
            if source in self.spiders:
                available_sources.append(source)
            else:
                print(f"警告: 数据源 {source} 不可用，已跳过")
        
        if not available_sources:
            raise ValueError("没有可用的数据源")
            
        task_id = str(uuid.uuid4())
        task = CrawlTask(task_id, query, available_sources)
        self.tasks[task_id] = task
        
        # 初始化每个数据源的状态
        for source in available_sources:
            task.update_source_status(source, CrawlStatus.PENDING, "等待开始")
        
        # 通知任务开始
        self._notify_status(task_id, self._get_task_status(task))
        
        # 异步执行爬取任务
        asyncio.create_task(self._execute_crawl_task(task))
        
        return task_id
    
    async def _execute_crawl_task(self, task: CrawlTask):
        """执行爬取任务"""
        try:
            task.status = CrawlStatus.RUNNING
            task.start_time = datetime.now()
            self._notify_status(task.task_id, self._get_task_status(task))
            
            # 并发执行各个数据源的爬取
            tasks = []
            for source in task.sources:
                crawl_task = self._crawl_single_source(task, source)
                tasks.append(crawl_task)
            
            # 等待所有数据源完成
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 处理结果
            for i, result in enumerate(results):
                source = task.sources[i]
                if isinstance(result, Exception):
                    task.update_source_status(source, CrawlStatus.FAILED, str(result))
                else:
                    task.results.extend(result)
                    task.update_source_status(source, CrawlStatus.COMPLETED, f"成功获取 {len(result)} 条数据")
            
            # 任务完成
            task.status = CrawlStatus.COMPLETED
            task.end_time = datetime.now()
            
        except Exception as e:
            task.status = CrawlStatus.FAILED
            task.error_message = str(e)
            task.end_time = datetime.now()
        
        finally:
            # 最终状态通知
            self._notify_status(task.task_id, self._get_task_status(task))
    
    async def _crawl_single_source(self, task: CrawlTask, source: str) -> List[dict]:
        """爬取单个数据源"""
        try:
            # 更新状态为运行中
            task.update_source_status(source, CrawlStatus.RUNNING, "正在爬取...")
            self._notify_status(task.task_id, self._get_task_status(task))
            
            # 获取对应的爬虫实例
            spider = self.spiders.get(source)
            if not spider:
                raise ValueError(f"爬虫 {source} 未找到")
            
            # 执行爬取
            results = await spider.crawl(task.query)
            
            return results
            
        except Exception as e:
            task.update_source_status(source, CrawlStatus.FAILED, str(e))
            self._notify_status(task.task_id, self._get_task_status(task))
            raise
    
    def _get_task_status(self, task: CrawlTask) -> dict:
        """获取任务状态"""
        return {
            "task_id": task.task_id,
            "query": task.query,
            "status": task.status.value,
            "start_time": task.start_time.isoformat() if task.start_time else None,
            "end_time": task.end_time.isoformat() if task.end_time else None,
            "total_sources": task.total_sources,
            "completed_sources": task.completed_sources,
            "failed_sources": task.failed_sources,
            "source_status": task.source_status,
            "total_results": len(task.results),
            "error_message": task.error_message,
            "progress": (task.completed_sources + task.failed_sources) / task.total_sources * 100
        }
    
    def get_task_status(self, task_id: str) -> Optional[dict]:
        """获取指定任务的状态"""
        task = self.tasks.get(task_id)
        if task:
            return self._get_task_status(task)
        return None
    
    def get_all_tasks(self) -> List[dict]:
        """获取所有任务状态"""
        return [self._get_task_status(task) for task in self.tasks.values()]
    
    def get_available_sources(self) -> List[dict]:
        """获取可用的数据源信息"""
        sources_info = []
        for name, spider in self.spiders.items():
            sources_info.append({
                "name": name,
                "cost_info": spider.get_cost_info(),
                "rate_limit": spider.get_rate_limit()
            })
        return sources_info

# 全局爬虫服务实例
crawler_service = CrawlerService()

# 示例状态回调函数
def example_status_callback(task_id: str, status_data: dict):
    """示例状态回调函数"""
    print(f"任务 {task_id} 状态更新: {status_data['status']}")
    print(f"进度: {status_data['progress']:.1f}%")
    print(f"已完成数据源: {status_data['completed_sources']}/{status_data['total_sources']}")
    print("---")

# 设置状态回调
crawler_service.status_callback = example_status_callback

# 使用示例
async def main():
    """使用示例"""
    # 配置爬虫参数
    config = {
    # "google_search": {"search_queries": [
    #     "-site:linkedin.com 'toy' whatsapp +1",
    #     "-site:facebook.com email phone contact",
    #     "-site:twitter.com email phone contact address",
    #     "-site:linkedin.com email contact"
    # ]},
    # "google_maps": {"search_queries": ["ACME China"], "api_key": "YOUR_GOOGLE_MAPS_KEY"},
    "apollo": {"search_queries": ["ACME Inc"], "api_key": "YOUR_APOLLO_KEY"},
    }
    
    # 创建爬虫服务实例
    service = CrawlerService(
        status_callback=example_status_callback,
        config=config
    )
    
    # 查看可用数据源
    print("可用数据源:")
    for source in service.get_available_sources():
        print(f"- {source['name']}: {source['cost_info']['description']}")
    
    # 启动爬取任务
    task_id = await service.start_crawl(
        query="科技公司",
        sources=["google_search"]  # 可以指定特定数据源
    )
    
    print(f"开始爬取任务: {task_id}")
    
    # 等待任务完成
    while True:
        status = service.get_task_status(task_id)
        if status['status'] in ['completed', 'failed']:
            print(f"任务完成，状态: {status['status']}")
            print(f"总结果数: {status['total_results']}")
            break
        await asyncio.sleep(1)

if __name__ == "__main__":
    asyncio.run(main())