"""
PicknBuy24图片专用提取器 - 只提取图片链接，不解析其他信息
"""
from typing import List, Dict, Any
from bs4 import BeautifulSoup
from urllib.parse import urljoin

from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper.interfaces import Extractor
from axiom_boot.scraper.models import Response, Target, Item

logger = get_logger(__name__)


class ImageOnlyItem(Item):
    """图片专用项目"""
    images: List[Dict[str, Any]]
    success: bool = True


class ImageOnlyExtractor(Extractor):
    """PicknBuy24图片专用提取器 - 只提取图片链接"""

    def extract(self, response: Response, target: Target) -> List[Item]:
        """
        只提取图片链接，不解析其他车辆信息
        """
        try:
            soup = BeautifulSoup(response.text, "html.parser")
            
            logger.debug(f"开始提取图片链接: {response.url}")
            
            # 直接提取图片
            images = self._extract_images(soup, response.url)
            
            logger.info(f"提取到 {len(images)} 张图片链接")
            
            item = ImageOnlyItem(images=images, success=True)
            return [item]
            
        except Exception as e:
            logger.error(f"图片提取失败: {e}")
            return [ImageOnlyItem(images=[], success=False)]
    
    def _extract_images(self, soup: BeautifulSoup, base_url: str) -> List[Dict[str, str]]:
        """提取PicknBuy24车辆大图 - 从gallery区域提取大图链接"""
        images = []
        
        # 查找图片廊gallery区域
        gallery = soup.find("div", class_="gallery")
        logger.debug(f"查找gallery区域: {'找到' if gallery else '未找到'}")
        
        if gallery:
            # 查找所有figure元素
            figures = gallery.find_all("figure")
            logger.debug(f"在gallery中找到{len(figures)}个figure元素")
            
            for i, figure in enumerate(figures):
                # 查找<a>标签的href属性，这里是大图链接
                link = figure.find("a")
                if link and link.get("href"):
                    large_img_url = link.get("href")
                    logger.debug(f"提取大图链接 {i+1}: {large_img_url}")
                    
                    # 构建完整URL
                    full_url = urljoin(base_url, large_img_url)
                    
                    # 确定文件扩展名
                    ext = ".jpg"
                    if ".png" in large_img_url.lower():
                        ext = ".png"
                    elif ".jpeg" in large_img_url.lower():
                        ext = ".jpeg"
                    elif ".webp" in large_img_url.lower():
                        ext = ".webp"
                    
                    images.append({
                        "url": full_url,
                        "type": "large",
                        "index": i + 1,
                        "filename": f"vehicle_{i+1:02d}{ext}"
                    })
        
        # 如果没有找到gallery，回退到查找所有<a>标签中的图片链接
        if not images:
            logger.debug("gallery方式未找到图片，尝试回退方式")
            links = soup.find_all("a", href=True)
            logger.debug(f"找到{len(links)}个<a>标签")
            
            for i, link in enumerate(links):
                href = link.get("href")
                if href and any(ext in href.lower() for ext in ["_l3.jpg", "_l.jpg", "_large.jpg"]):
                    logger.debug(f"回退方式找到图片: {href}")
                    full_url = urljoin(base_url, href)
                    
                    ext = ".jpg"
                    if ".png" in href.lower():
                        ext = ".png"
                    elif ".jpeg" in href.lower():
                        ext = ".jpeg"
                    elif ".webp" in href.lower():
                        ext = ".webp"
                    
                    images.append({
                        "url": full_url,
                        "type": "large",
                        "index": len(images) + 1,
                        "filename": f"fallback_{len(images)+1:02d}{ext}"
                    })
        
        logger.debug(f"最终提取到{len(images)}张图片")
        return images
