# -*- coding: utf-8 -*-

import scrapy
from .news_list import NewsListParser
from .news_detail import NewsDetailParser
from urllib.parse import urljoin, urlparse
from scrapy import Request
from bs4 import BeautifulSoup
from .audit_committee import AuditCommitteeParser
from .incident_detail import IncidentDetailParser
from .incident_list import IncidentListParser

class LondonFireSiteSpider(scrapy.Spider):
    """伦敦消防站点主爬虫 - 使用模块化页面解析器"""
    
    name = 'londonfire_site'
    allowed_domains = ['london-fire.gov.uk', 'www.london-fire.gov.uk']
    
    base_url = "https://www.london-fire.gov.uk"
    
    # 要爬取的页面类型
    page_types = ['news', 'safety', 'incidents']
    

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
        # 初始化页面解析器
        self.parsers = [
            NewsListParser(self),
            NewsDetailParser(self),
            AuditCommitteeParser(self),
            IncidentListParser(self),
            IncidentDetailParser(self),
        ]
        self.route()





    # 安全信息页面
    def fetch_abouts(self):
        abouts_url = [
            'https://www.london-fire.gov.uk/about-us/services-and-facilities/vehicles-and-equipment/',
            'https://www.london-fire.gov.uk/about-us/services-and-facilities/media-resources/', 'https://www.london-fire.gov.uk/about-us/services-and-facilities/techniques-and-procedures/', 'https://www.london-fire.gov.uk/about-us/services-and-facilities/services-we-offer/', 'https://www.london-fire.gov.uk/about-us/services-and-facilities/false-alarms-and-lift-calls/'
        ]
        self.start_urls.extend(abouts_url)
     


    def route(self):
        self.start_urls = []

        self.fetch_abouts()

        self.start_urls.extend([
            'https://www.london-fire.gov.uk/about-us/governance-london-fire-commissioner/audit-committee/',
            'https://www.london-fire.gov.uk/incidents/',
        ])
    
    def parse(self, response):
        """主解析方法 - 根据页面类型选择合适的解析器"""
        
        # 遍历所有解析器，找到能处理该响应的解析器
        for parser in self.parsers:
            can_handle = parser.can_handle(response)
            if can_handle:
                yield from parser.parse(response)
                return
        
    
    def add_parser(self, parser_class):
        """动态添加解析器"""
        parser = parser_class(self)
        self.parsers.append(parser)
    
    def remove_parser(self, parser_class):
        """移除解析器"""
        self.parsers = [p for p in self.parsers if not isinstance(p, parser_class)] 