# -*- coding: utf-8 -*-

import scrapy
from .product_list import ProductListtParser
from .product_detail import ProductDetailParser
from urllib.parse import urljoin, urlparse
from scrapy import Request
from bs4 import BeautifulSoup
from .cate_list1 import CateList1Parser

class CCCFSiteSpider(scrapy.Spider):
    """CCCF站点主爬虫 - 使用模块化页面解析器"""
    
    name = 'cccf_site'
    allowed_domains = ['cccf.com.cn', 'www.cccf.com.cn']
    
    base_url = "https://www.cccf.com.cn"
    
    # 要爬取的页面类型
    page_types = ['news', 'safety', 'incidents']
    

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
        # 设置代理
        # self.proxy = "http://D0BFA2CA:809AD5BFCDCB@tunpool-yu7bw.qg.net:11639"
        self.proxy = None
        
        # 初始化页面解析器
        self.parsers = [
            ProductListtParser(self),
            ProductDetailParser(self),
            CateList1Parser(self),
        ]
        self.route()

    def route(self):
        """设置起始URL"""
        self.start_urls = []
        
        # 添加起始URL
        start_urls = [
            'https://www.cccf.com.cn:8088/certSearch/page/qzxrzxxgb',
            'https://www.cccf.com.cn:8088/certSearch/page/jsjdxxgb',
            'https://www.cccf.com.cn:8088/certSearch/page/qtcp_zyx',
        ]
        self.start_urls.extend(start_urls)
    
    def start_requests(self):
        """重写起始请求方法，设置代理"""
        for url in self.start_urls:
            yield Request(
                url=url,
                meta={'proxy': self.proxy},
                callback=self.parse
            )
    
    def parse(self, response):
        """主解析方法 - 根据页面类型选择合适的解析器"""
        
        # 快速路由，避免遍历所有解析器
        page_type = response.meta.get('page_type')
        
        if page_type == 'cccf_product_detail':
            # 直接调用详情页解析器
            for parser in self.parsers:
                if isinstance(parser, ProductDetailParser):
                    yield from parser.parse(response)
                    return
        elif page_type == 'cccf_product_list':
            # 直接调用列表页解析器
            for parser in self.parsers:
                if isinstance(parser, ProductListtParser):
                    yield from parser.parse(response)
                    return
        else:
            # 对于初始页面，使用 CateList1Parser
            for parser in self.parsers:
                if isinstance(parser, CateList1Parser):
                    yield from parser.parse(response)
                    return
        
        self.logger.warning(f"没有找到合适的解析器处理: {response.url}")
    
    def add_parser(self, parser_class):
        """动态添加解析器"""
        parser = parser_class(self)
        self.parsers.append(parser)
    
    def remove_parser(self, parser_class):
        """移除解析器"""
        self.parsers = [p for p in self.parsers if not isinstance(p, parser_class)] 