# -*- coding: utf-8 -*-

import scrapy
from .news_list import NewsListParser
from .news_detail import NewsDetailParser
from urllib.parse import urljoin, urlparse

class BeijingFireSiteSpider(scrapy.Spider):
    """北京消防协会站点主爬虫 - 使用模块化页面解析器"""
    
    name = 'beijingfire_site'
    allowed_domains = ['beijingfire.com', 'www.beijingfire.com']
    
    base_url = "http://www.beijingfire.com"
    
    # 要爬取的分类ID
    category_ids = [ 65556, 65557, 65558, 45, 14, 57, 65559 ]
    

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
        # 初始化页面解析器
        self.parsers = [
            NewsListParser(self),
            NewsDetailParser(self),
        ]
        self.route()


    # 标题，发布时间，内容
    def fetch_article(self):
        for catid in self.category_ids:
            url = urljoin(self.base_url, f'home/newslist.php?catid={catid}')
            self.start_urls.append(url)

    # 抓取法律法规的名称，发布时间，下载法规 pdf 文档
    def fetch_law(self):
        url = urljoin(self.base_url, 'home/law.php')
        self.start_urls.append(url)

    # 规范标准：抓取规范标准的名称、标准编号、发布实施时间，下载标准 pdf 文档
    def fetch_law_cate80(self):
        url = urljoin(self.base_url, 'home/law.php?catid=80')
        self.start_urls.append(url)


        # 禁用日志输出
    def route(self):
        self.start_urls = []
        
        self.fetch_article()
        self.fetch_law()
        self.fetch_law_cate80()
    
    def parse(self, response):
        """主解析方法 - 根据页面类型选择合适的解析器"""
        # 禁用日志输出
        
        # 遍历所有解析器，找到能处理该响应的解析器
        for parser in self.parsers:
            if parser.can_handle(response):
                # 禁用日志输出
                yield from parser.parse(response)
                return
        
    

    

    
    def add_parser(self, parser_class):
        """动态添加解析器"""
        parser = parser_class(self)
        self.parsers.append(parser)
        # 禁用日志输出
    
    def remove_parser(self, parser_class):
        """移除解析器"""
        self.parsers = [p for p in self.parsers if not isinstance(p, parser_class)]
        # 禁用日志输出 