"""
爬虫常量定义模块
"""
from enum import Enum
from typing import Optional
from scrapers.base_scraper import BaseScraper
from config import config

# 向后兼容的 headers 导入
headers = config.DEFAULT_HEADERS


class SearchSource(Enum):
    """支持的小说网站枚举"""

    BQGL_CC = "bqgl.cc"
    BQG_128 = "bqg128.com"
    XS_520 = "xs520.com"
    BBQD = "bbiqudu.com"

    @classmethod
    def get_scraper(cls, domain: str) -> Optional[BaseScraper]:
        """
        根据域名获取对应的爬虫实例

        Args:
            domain: 网站域名

        Returns:
            对应的爬虫实例，如果不支持则返回 None
        """
        domain = domain.lower()

        for item in cls:
            if item.name.lower() in domain or item.value in domain:
                scraper_mapping = {
                    cls.BQGL_CC: "scrapers.bqg_cc_scraper.BqgCCScraper",
                    cls.BQG_128: "scrapers.bqg_128_scraper.Bqg128Scraper",
                    cls.XS_520: "scrapers.xs_520_scraper.XS520Scraper",
                    cls.BBQD: "scrapers.bbqd_scraper.BBqdScraper",
                }

                scraper_class_path = scraper_mapping.get(item)
                if scraper_class_path:
                    try:
                        module_path, class_name = scraper_class_path.rsplit('.', 1)
                        module = __import__(module_path, fromlist=[class_name])
                        scraper_class = getattr(module, class_name)
                        return scraper_class()
                    except (ImportError, AttributeError) as e:
                        print(f"加载爬虫失败 {scraper_class_path}: {e}")

        return None

    # 向后兼容的方法名
    @classmethod
    def which_object(cls, name: str) -> Optional[BaseScraper]:
        """向后兼容方法"""
        return cls.get_scraper(name)
