"""
基础爬虫抽象类 - Scrapy版本
提供爬虫的基础功能和接口定义
"""
import asyncio
import time
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Callable
from enum import Enum
from datetime import datetime
import logging
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from scrapy import signals
from scrapy.signalmanager import dispatcher

class SpiderStatus(Enum):
    """爬虫状态枚举"""
    IDLE = "idle"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"
    PAUSED = "paused"

class BaseScrapySpider(scrapy.Spider, ABC):
    """基础Scrapy爬虫类"""
    
    def __init__(self, name: str, query: str = None, status_callback: Callable = None, *args, **kwargs):
        super().__init__(name, *args, **kwargs)
        self.query = query
        self.status_callback = status_callback
        self.results = []
        self.status = SpiderStatus.IDLE
        self.start_time = None
        self.end_time = None
        self.error_message = None
        
        # 设置状态回调
        if status_callback:
            dispatcher.connect(self._on_item_scraped, signal=signals.item_scraped)
            dispatcher.connect(self._on_spider_closed, signal=signals.spider_closed)
            dispatcher.connect(self._on_spider_error, signal=signals.spider_error)
    
    def _on_item_scraped(self, item, response, spider):
        """当数据被爬取时调用"""
        if spider == self:
            self.results.append(item)
            if self.status_callback:
                self.status_callback(self.name, {
                    "status": "item_scraped",
                    "item_count": len(self.results),
                    "message": f"已爬取 {len(self.results)} 条数据"
                })
    
    def _on_spider_closed(self, spider, reason):
        """当爬虫关闭时调用"""
        if spider == self:
            self.end_time = datetime.now()
            if reason == 'finished':
                self.status = SpiderStatus.COMPLETED
            else:
                self.status = SpiderStatus.FAILED
                self.error_message = reason
            
            if self.status_callback:
                self.status_callback(self.name, {
                    "status": self.status.value,
                    "total_items": len(self.results),
                    "message": f"爬虫完成，共获取 {len(self.results)} 条数据"
                })
    
    def _on_spider_error(self, failure, response, spider):
        """当爬虫出错时调用"""
        if spider == self:
            self.status = SpiderStatus.FAILED
            self.error_message = str(failure.value)
            if self.status_callback:
                self.status_callback(self.name, {
                    "status": "error",
                    "error": str(failure.value),
                    "message": f"爬虫出错: {failure.value}"
                })
    
    def start_requests(self):
        """开始请求 - 子类必须实现"""
        self.status = SpiderStatus.RUNNING
        self.start_time = datetime.now()
        if self.status_callback:
            self.status_callback(self.name, {
                "status": "running",
                "message": "开始爬取"
            })
        return self._get_start_requests()
    
    @abstractmethod
    def _get_start_requests(self):
        """获取起始请求 - 子类必须实现"""
        pass
    
    @abstractmethod
    def parse(self, response):
        """解析响应 - 子类必须实现"""
        pass
    
    def get_stats(self) -> Dict[str, Any]:
        """获取爬虫统计信息"""
        duration = None
        if self.start_time and self.end_time:
            duration = (self.end_time - self.start_time).total_seconds()
        
        return {
            "spider_name": self.name,
            "status": self.status.value,
            "start_time": self.start_time.isoformat() if self.start_time else None,
            "end_time": self.end_time.isoformat() if self.end_time else None,
            "duration_seconds": duration,
            "total_items": len(self.results),
            "error_message": self.error_message
        }
    
    def is_free_source(self) -> bool:
        """是否为免费数据源"""
        return True
    
    def get_rate_limit(self) -> float:
        """获取请求频率限制（秒）"""
        return 1.0
    
    def get_cost_info(self) -> Dict[str, Any]:
        """获取成本信息"""
        return {
            "is_free": self.is_free_source(),
            "rate_limit": self.get_rate_limit(),
            "description": "免费数据源"
        }

class SpiderFactory:
    """爬虫工厂类"""
    
    _spiders = {}
    
    @classmethod
    def register(cls, name: str, spider_class: type):
        """注册爬虫类"""
        cls._spiders[name] = spider_class
    
    @classmethod
    def create(cls, name: str, query: str, status_callback: Callable = None, **kwargs) -> BaseScrapySpider:
        """创建爬虫实例"""
        if name not in cls._spiders:
            raise ValueError(f"未找到爬虫: {name}")
        
        spider_class = cls._spiders[name]
        return spider_class(name=name, query=query, status_callback=status_callback, **kwargs)
    
    @classmethod
    def list_spiders(cls) -> List[str]:
        """列出所有可用的爬虫"""
        return list(cls._spiders.keys())
    
    @classmethod
    def get_spider_info(cls, name: str) -> Optional[Dict[str, Any]]:
        """获取爬虫信息"""
        if name not in cls._spiders:
            return None
        
        spider_class = cls._spiders[name]
        spider = spider_class()
        
        return {
            "name": name,
            "class": spider_class.__name__,
            "cost_info": spider.get_cost_info(),
            "description": getattr(spider_class, '__doc__', '')
        }

class ScrapyCrawler:
    """Scrapy爬虫管理器"""
    
    def __init__(self, settings: Optional[Dict] = None):
        self.settings = settings or self._get_default_settings()
        self.process = None
    
    def _get_default_settings(self) -> Dict:
        """获取默认设置"""
        return {
            'BOT_NAME': 'contact_crawler',
            'SPIDER_MODULES': ['spiders'],
            'NEWSPIDER_MODULE': 'spiders',
            'ROBOTSTXT_OBEY': True,
            'DOWNLOAD_DELAY': 2,
            'RANDOMIZE_DOWNLOAD_DELAY': True,
            'CONCURRENT_REQUESTS': 1,
            'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
            'DOWNLOADER_MIDDLEWARES': {
                'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
                'spiders.middlewares.RandomUserAgentMiddleware': 400,
            },
            'ITEM_PIPELINES': {
                'spiders.pipelines.DataValidationPipeline': 300,
                'spiders.pipelines.DeduplicationPipeline': 400,
            },
            'LOG_LEVEL': 'INFO',
            'LOG_FILE': 'logs/crawler.log',
        }
    
    async def run_spider(self, spider_class, **kwargs) -> List[Dict]:
        """运行爬虫"""
        try:
            # 创建爬虫进程
            self.process = CrawlerProcess(self.settings)
            
            # 添加爬虫
            self.process.crawl(spider_class, **kwargs)
            
            # 运行爬虫
            self.process.start()
            
            # 获取结果
            spider = self.process.crawler.spidercls(**kwargs)
            return spider.results
            
        except Exception as e:
            logging.error(f"运行爬虫失败: {e}")
            raise
        finally:
            if self.process:
                self.process.stop()