# -*- coding: utf-8 -*-

import scrapy
from urllib.parse import urljoin, urlparse
from scrapy import Request
from bs4 import BeautifulSoup
from .cfast_manua import CfastManualParser
from .fcd_list import FcdListParser
from .project_detail import ProjectDetailParser
class NISTSiteSpider(scrapy.Spider):
    """NIST站点主爬虫 - 使用模块化页面解析器"""
    
    name = 'nist_site'
    allowed_domains = ['nist.gov', 'www.nist.gov']
    
    base_url = "https://www.nist.gov"
    

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
        # 初始化页面解析器
        self.parsers = [
            CfastManualParser(self),
            FcdListParser(self),
            ProjectDetailParser(self)
        ]
        self.route()

    def route(self):
        """设置起始URL"""
        self.start_urls = []
        
        # 添加起始URL
        start_urls = [
            'https://pages.nist.gov/cfast/manuals.html',
            'https://pages.nist.gov/fds-smv/manuals.html',
            'https://www.nist.gov/el/fcd',
        ]
        self.start_urls.extend(start_urls)
    
    def parse(self, response):
        """主解析方法 - 根据页面类型选择合适的解析器"""
        
        
        # 遍历所有解析器，找到能处理该响应的解析器
        for parser in self.parsers:
            can_handle = parser.can_handle(response)
            if can_handle:
                yield from parser.parse(response)
                return
        
    
    def add_parser(self, parser_class):
        """动态添加解析器"""
        parser = parser_class(self)
        self.parsers.append(parser)
    
    def remove_parser(self, parser_class):
        """移除解析器"""
        self.parsers = [p for p in self.parsers if not isinstance(p, parser_class)] 