from urllib.parse import urljoin
from ..logger import logger
from ..exceptions import TaskStopException
from .seller_info_spider import SellerInfoSpider

from bs4 import BeautifulSoup

from .spider_base import SpiderBase
from .seller_info_spider import SellerInfoSpider

class SellerLinkSpider(SpiderBase):
    '''卖家信息页链接爬虫'''

    def extract_link(self, html_content, url: str):
        soup = BeautifulSoup(html_content, 'html.parser')
        # 原始Selenium选择器转换为BS4表达式
        element = soup.select_one('#merchantInfoFeature_feature_div > div:nth-child(2) > div:nth-child(1) > span:nth-child(1)')
        if element and element.get_text(strip=True) == 'Amazon.com':
            logger.info(f"页面是 Amazon.com 页面，跳过")
            raise TaskStopException(f"页面是 Amazon.com 页面，跳过 {url}")
                
        # 使用 bs4 查找卖家信息链接
        merchant_link_element = soup.select_one('a#sellerProfileTriggerId')
        link = merchant_link_element.get('href') if merchant_link_element else ''
        if not link:
            return ''
        # 转换为绝对路径
        return urljoin('https://www.amazon.com', link)

    def crawl(self, url):
        html_content = self.get(url)
        seller_link = self.extract_link(html_content, url)
        logger.info(f"url: {url}, 卖家信息链接: {seller_link}")
        if not seller_link:
            logger.info(f"url: {url}, 卖家信息链接为空，跳过")
            return
        SellerInfoSpider().crawl(seller_link)


if __name__ == '__main__':
    spider = SellerLinkSpider()
    url = "https://www.amazon.com/KOTDNING-Hollywood-Dimmable-Detachable-Magnification/dp/B091B1J8G5/ref=zg_bs_g_13749901_d_sccl_26/147-0688270-9173813?th=1"
    try:
        spider.crawl(url)
    finally:
        spider.close()