# -*- coding: utf-8 -*-
"""
Scrapy extensions for bot_scrapy
"""
import logging
from scrapy import signals
from scrapy.exceptions import NotConfigured

logger = logging.getLogger(__name__)

class SpiderStats:
    """收集爬虫统计信息的扩展"""
    
    def __init__(self):
        self.items_scraped = 0
        self.requests_scheduled = 0
        self.start_time = None
        
    @classmethod
    def from_crawler(cls, crawler):
        """从crawler创建扩展实例"""
        if not crawler.settings.getbool('STATS_ENABLED', True):
            raise NotConfigured
            
        ext = cls()
        
        # 连接信号
        crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
        crawler.signals.connect(ext.item_scraped, signal=signals.item_scraped)
        crawler.signals.connect(ext.request_scheduled, signal=signals.request_scheduled)
        
        return ext
        
    def spider_opened(self, spider):
        """爬虫启动时的处理"""
        from datetime import datetime
        self.start_time = datetime.now()
        logger.info("Spider started: %s", spider.name)
        
    def spider_closed(self, spider, reason):
        """爬虫关闭时的处理"""
        from datetime import datetime
        end_time = datetime.now()
        duration = end_time - self.start_time
        
        stats = {
            'duration': str(duration),
            'items_scraped': self.items_scraped,
            'requests_scheduled': self.requests_scheduled,
            'items_per_second': self.items_scraped / duration.total_seconds() if duration.total_seconds() > 0 else 0
        }
        
        logger.info("Spider closed: %s", spider.name)
        logger.info("Statistics: %s", stats)
        
    def item_scraped(self, item, spider):
        """处理已爬取的项目"""
        self.items_scraped += 1
        
    def request_scheduled(self, request, spider):
        """处理已调度的请求"""
        self.requests_scheduled += 1