# -*- coding: utf-8 -*-

import scrapy
from .news_list import NewsListParser
from .news_detail import NewsDetailParser
from urllib.parse import urljoin, urlparse
from scrapy import Request
from bs4 import BeautifulSoup
from .press_releases import PressReleasesParser
from .logo_guideline import LogGuideLineParser
from .leadership import LeadershipParser
class IAFFSiteSpider(scrapy.Spider):
    """IAFF站点主爬虫 - 使用模块化页面解析器"""
    
    name = 'iaff_site'
    allowed_domains = ['iaff.org', 'www.iaff.org']
    
    base_url = "https://www.iaff.org"
    
    # 要爬取的页面类型
    page_types = ['news', 'safety', 'training', 'resources']
    

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
        # 初始化页面解析器
        self.parsers = [
            NewsListParser(self),
            NewsDetailParser(self),
            PressReleasesParser(self),
            LogGuideLineParser(self),
            LeadershipParser(self),
        ]
        self.route()

    def route(self):
        """设置起始URL"""
        self.start_urls =  [
            'https://www.iaff.org/about-us/leadership/',
            'https://www.iaff.org/logo-guidelines/',
            'https://www.iaff.org/press-releases/',
            'https://www.iaff.org/newsroom/',
        ]
    
    def parse(self, response):
        """主解析方法 - 根据页面类型选择合适的解析器"""
        
        
        # 遍历所有解析器，找到能处理该响应的解析器
        for parser in self.parsers:
            can_handle = parser.can_handle(response)
            if can_handle:
                yield from parser.parse(response)
                return
        
        # 如果没有找到合适的解析器，使用默认处理
    
    def add_parser(self, parser_class):
        """动态添加解析器"""
        parser = parser_class(self)
        self.parsers.append(parser)
    
    def remove_parser(self, parser_class):
        """移除解析器"""
        self.parsers = [p for p in self.parsers if not isinstance(p, parser_class)] 