import scrapy
import time
from datetime import datetime
from fin_senti_entity_platform.utils.config_loader import config_loader
from fin_senti_entity_platform.utils.logger import Logger

# 初始化日志记录器
logger = Logger.get_logger('base_spider', 'data_collection.log')

def retry_on_error(max_retries=3, delay=2):
    """
    错误重试装饰器
    """
    def decorator(func):
        def wrapper(self, *args, **kwargs):
            retries = 0
            while retries < max_retries:
                try:
                    return func(self, *args, **kwargs)
                except Exception as e:
                    retries += 1
                    logger.error(f"执行 {func.__name__} 失败: {str(e)}, 第 {retries} 次重试")
                    if retries >= max_retries:
                        logger.error(f"执行 {func.__name__} 达到最大重试次数")
                        raise
                    time.sleep(delay * retries)  # 指数退避
        return wrapper
    return decorator

class BaseSpider(scrapy.Spider):
    """
    金融数据采集基础爬虫类
    所有具体数据源的爬虫都应继承此类
    """
    name = "base_spider"
    # 定义爬虫的启动URL
    start_urls = []
    # 定义目标网站的配置信息
    website_config = {
        'headers': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
        },
        'timeout': 30,
        'download_delay': 1.0
    }
    
    def __init__(self, *args, **kwargs):
        """
        初始化爬虫，加载配置
        """
        super(BaseSpider, self).__init__(*args, **kwargs)
        # 加载爬虫配置
        self.load_spider_config()
        # 初始化爬虫状态
        self.crawl_start_time = datetime.now()
        self.item_count = 0
        self.error_count = 0
        logger.info(f"{self.name} 爬虫初始化完成")
        
    def load_spider_config(self):
        """
        从配置文件加载爬虫配置
        """
        try:
            spider_config = config_loader.get('data_collection', {}).get('spiders', {}).get(self.name, {})
            if spider_config:
                self.website_config.update(spider_config)
                logger.info(f"已加载 {self.name} 的爬虫配置")
        except Exception as e:
            logger.error(f"加载 {self.name} 爬虫配置失败: {str(e)}")
            
    def start_requests(self):
        """
        生成初始请求
        """
        logger.info(f"{self.name} 开始爬取，共 {len(self.start_urls)} 个URL")
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                callback=self.parse,
                headers=self.website_config['headers'],
                meta={'url': url, 'retry_count': 0}
            )
            # 添加下载延迟
            time.sleep(self.website_config['download_delay'])
    
    @retry_on_error(max_retries=3, delay=2)
    def parse(self, response):
        """
        解析响应内容，子类需重写此方法
        """
        raise NotImplementedError("子类必须实现 parse 方法")
        
    def handle_error(self, failure):
        """
        处理请求失败
        """
        request = failure.request
        self.error_count += 1
        logger.error(f"请求失败: {request.url}, 错误: {str(failure.value)}")
        
        # 尝试重试
        retry_count = request.meta.get('retry_count', 0)
        max_retries = config_loader.get('data_collection', {}).get('max_retries', 3)
        
        if retry_count < max_retries:
            retry_count += 1
            logger.info(f"重试请求: {request.url}, 第 {retry_count} 次")
            yield request.copy_with(meta={'retry_count': retry_count})
    
    def closed(self, reason):
        """
        爬虫关闭时执行
        """
        crawl_end_time = datetime.now()
        duration = (crawl_end_time - self.crawl_start_time).total_seconds()
        logger.info(f"{self.name} 爬虫关闭，原因: {reason}")
        logger.info(f"爬取统计: 总时间 {duration:.2f}s, 采集项目数 {self.item_count}, 错误数 {self.error_count}")
        
        # 记录爬虫状态到监控系统
        self._report_spider_status(reason, duration)
        
    def _report_spider_status(self, reason, duration):
        """
        报告爬虫状态到监控系统
        """
        try:
            # 这里将在后续实现监控集成
            pass
        except Exception as e:
            logger.error(f"报告爬虫状态失败: {str(e)}")
