from urllib.parse import urljoin

from bs4 import BeautifulSoup
from ..logger import logger

from .spider_base import SpiderBase
from urllib.parse import urlparse, parse_qs
from ..models.seller_info import SellerInfo, conn_pool


class SellerInfoSpider(SpiderBase):

    def extract_seller_info(self, html_content):
        """提取指定卖家信息区块"""
        soup = BeautifulSoup(html_content, features="html.parser")
        seller_section = soup.find(id='page-section-detail-seller-info')
        
        if not seller_section:
            return None
    
        # 查找所有目标元素
        target_elements = seller_section.select('div.a-row.a-spacing-none.indent-left')
        
        
        # 提取元素内容示例
        elements_content = [ele.get_text(strip=True) for ele in target_elements]

        seller_name = seller_section.select_one('div > div > div > div:nth-child(2) > span:nth-child(2)').get_text().strip()
    
        store_link_tag = soup.select_one('#seller-info-storefront-link span a')
        seller_store_link = urljoin('https://www.amazon.com', store_link_tag['href']) if store_link_tag else ''

        if len(elements_content) == 5:
            return {
                'seller_name': seller_name,
                'detail_addr': f'{elements_content[0]}',
                'city': elements_content[1],
                'province': elements_content[2],
                'postal_code': elements_content[3],
                'country': elements_content[4],
                'seller_store_link': seller_store_link 
            }
        elif len(elements_content) == 6:
            return {
                'seller_name': seller_name,
                'detail_addr': f'{elements_content[1]}{elements_content[0]}',
                'city': elements_content[2],
                'province': elements_content[3],
                'postal_code': elements_content[4],
                'country': elements_content[5],
                'seller_store_link': seller_store_link 
            }
        elif len(elements_content) == 7:
            return {
                'seller_name': seller_name,
                'detail_addr': f'{elements_content[2]}{elements_content[1]}{elements_content[0]}',
                'city': elements_content[3],
                'province': elements_content[4],
                'postal_code': elements_content[5],
                'country': elements_content[6],
                'seller_store_link': seller_store_link 

            }

    def crawl(self, url):
        html_content = self.get(url)
        parsed_url = urlparse(url)
        query_params = parse_qs(parsed_url.query)
        seller_id = query_params.get('seller', [''])[0]
        seller_info = self.extract_seller_info(html_content)
        if seller_info:
            seller_info['seller_id'] = seller_id
            conn = conn_pool.get_connection()
            SellerInfo.save_seller_info(
                conn,
                seller_info['seller_id'],
                seller_info['country'],
                seller_info['postal_code'],
                seller_info['province'],
                seller_info['city'],
                seller_info['detail_addr'],
                seller_info['seller_name'],
                seller_info['seller_store_link']
            )
            conn.close()
        logger.info(f'url: {url}, 卖家信息: {seller_info}')


if __name__ == '__main__':
    spider = SellerInfoSpider()
    try:
        result = spider.get(
            'https://www.amazon.com/sp?ie=UTF8&seller=APKXEAVHKYW0Q&asin=B0CDQ29RD4'
        )
        if result:
            seller_info = spider.extract_seller_info(result)  # 新增解析调用
            print(seller_info)
        result = spider.get(
            'https://www.amazon.com/sp?ie=UTF8&seller=ANOU17ID11RDY&asin=B0DMVFL3LW'
        )
        if result:
            seller_info = spider.extract_seller_info(result)  # 新增解析调用
            print(seller_info)
    finally:
        spider.close()