import hashlib
import os
from typing import List, Optional
from bs4 import BeautifulSoup, Tag
from urllib.parse import urljoin

from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper.interfaces import Extractor
from axiom_boot.scraper.models import Response, Item, Target
from src.scraper.models.SupplierItem import SupplierItem

logger = get_logger(__name__)


class HomepageExtractor(Extractor):
    """
    负责从供应商主页提取 "About Us" 和 "Contact Us" 的真实详情页链接。
    此最终版本严格按照您提供的、基于URL模式的精准查找逻辑实现。
    """

    def extract(self, response: Response, target: Target) -> List[Item]:
        soup = BeautifulSoup(response.text, "html.parser")
        item = target.metadata.get("item", SupplierItem())
        
        logger.info(f"开始从主页 {response.url} 提取详情页链接...")

        # 增加新的选择器 div.sr-nav-wrap
        nav_selectors = [
            "div.site-nav", "div.nav-box", "nav.main-nav", ".J-main-nav", "div.sr-nav-wrap"
        ]

        nav_container = None
        for selector in nav_selectors:
            container = soup.select_one(selector)
            if container:
                nav_container = container
                logger.info(f"成功使用选择器 '{selector}' 定位到导航容器。")
                break
        
        if not nav_container:
            logger.error(f"在页面 {response.url} 未能使用任何已知选择器找到导航容器。将保存页面用于调试。")
            self._save_html_for_debugging(soup, response.url)
            return [item]

        # 严格按照您的指示，通过 URL 模式而不是文本来查找链接。
        item.company_url = self._find_link_by_href_pattern(nav_container, "company-", response.url)
        item.contact_url = self._find_link_by_href_pattern(nav_container, "contact-info.html", response.url)

        # 增加日志，明确报告提取结果
        if not item.company_url:
            logger.warning("最终未能提取到 'About Us' 链接。")
        if not item.contact_url:
            logger.warning("最终未能提取到 'Contact Us' 链接。")

        return [item]

    def _save_html_for_debugging(self, soup: BeautifulSoup, url: str):
        """当提取失败时，保存页面HTML以供分析。"""
        debug_path = "storage/debug"
        os.makedirs(debug_path, exist_ok=True)
        filename = hashlib.md5(url.encode()).hexdigest() + ".html"
        file_path = os.path.join(debug_path, filename)
        try:
            with open(file_path, "w", encoding="utf-8") as f:
                f.write(soup.prettify())
            logger.info(f"【调试模式】页面 HTML 已保存到: {file_path}")
        except Exception as e:
            logger.error(f"保存调试文件失败: {e}")

    def _find_link_by_href_pattern(self, scope: Tag, pattern: str, base_url: str) -> str:
        """在指定的导航容器内，通过 href 属性中包含的特定模式来精确查找链接。"""
        link_tag = scope.find("a", href=lambda href: href and pattern in href)
        
        if link_tag:
            url = self._normalize_url(link_tag.get("href"), base_url)
            logger.info(f"成功通过 href 模式 '{pattern}' 找到链接: {url}")
            return url
        else:
            logger.warning(f"在当前导航容器内，未能找到 href 包含 '{pattern}' 的链接。")
            return ""

    def _normalize_url(self, url: str, base_url: str) -> str:
        """确保 URL 是完整的，处理绝对、相对和协议相对路径。"""
        if url.startswith("//"):
            return "https:" + url
        if url.startswith("/"):
            return urljoin(base_url, url)
        return url 