"""
PicknBuy24网站探测器 - 动态获取总车辆数和分页信息
"""
import re
from typing import Dict, Any, Optional, List
from bs4 import BeautifulSoup

from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper.interfaces import Extractor
from axiom_boot.scraper.models import Response, Target, Item

logger = get_logger(__name__)


class SiteInfo(Item):
    """网站信息项目"""
    total_vehicles: int
    total_pages: int
    per_page: int
    current_page: int
    success: bool
    last_page_url: str = ""


class SiteProbeExtractor(Extractor):
    """PicknBuy24网站探测器 - 获取总数量和分页信息"""

    def extract(self, response: Response, target: Target) -> List[Item]:
        """
        从PicknBuy24首页提取总车辆数和分页信息
        
        Returns:
            List[SiteInfo]: 包含网站统计信息
        """
        try:
            soup = BeautifulSoup(response.text, "html.parser")
            logger.info("开始探测PicknBuy24网站信息...")
            
            # 方法1: 从页面顶部的Result信息提取
            total_vehicles = self._extract_total_from_result(soup)
            
            # 方法2: 从分页导航提取页数信息
            pagination_info = self._extract_pagination_info(soup)
            
            # 方法3: 从URL参数判断每页数量
            per_page = self._extract_per_page_from_url(target.url)
            
            # 计算总页数
            if total_vehicles > 0 and per_page > 0:
                total_pages = (total_vehicles + per_page - 1) // per_page
            else:
                total_pages = pagination_info.get('max_page', 0)
            
            site_info = SiteInfo(
                total_vehicles=total_vehicles,
                total_pages=total_pages,
                per_page=per_page,
                current_page=pagination_info.get('current_page', 1),
                success=total_vehicles > 0,
                last_page_url=pagination_info.get('last_page_url', '')
            )
            
            logger.info(f"网站探测完成: {total_vehicles}辆车, {total_pages}页, 每页{per_page}辆")
            return [site_info]
            
        except Exception as e:
            logger.error(f"PicknBuy24网站探测失败: {e}")
            return [SiteInfo(
                total_vehicles=0,
                total_pages=0,
                per_page=0,
                current_page=0,
                success=False
            )]
    
    def _extract_total_from_result(self, soup: BeautifulSoup) -> int:
        """从Result区域提取总车辆数 - 基于真实HTML结构"""
        try:
            # 真实HTML结构: <div class="result">Result: <span>71029</span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No. of Display: ...
            result_div = soup.find('div', class_='result')
            if result_div:
                # 查找第一个span标签（包含总数）
                spans = result_div.find_all('span')
                if spans:
                    first_span = spans[0]  # 第一个span是总数
                    total_text = first_span.get_text().strip()
                    if total_text.isdigit():
                        total = int(total_text)
                        logger.info(f"从result div的第一个span获取总数: {total}")
                        return total
            
            # 方法2: 查找包含 "Result:" 的文本节点后的数字
            result_text = soup.find(text=re.compile(r'Result:\s*', re.I))
            if result_text:
                # 查找紧邻的span标签
                parent = result_text.parent
                if parent:
                    next_span = parent.find_next('span')
                    if next_span and next_span.get_text().strip().isdigit():
                        total = int(next_span.get_text().strip())
                        logger.info(f"从Result文本后的span获取总数: {total}")
                        return total
            
            # 方法3: 直接正则匹配 "Result: 数字" 模式
            page_text = soup.get_text()
            match = re.search(r'Result:\s*(\d+)', page_text, re.I)
            if match:
                total = int(match.group(1))
                logger.info(f"从正则匹配获取总数: {total}")
                return total
            
            # 方法4: 查找其他可能的总数显示方式
            total_patterns = [
                r'Total.*?(\d+)',
                r'(\d+)\s*cars?\s*found',
                r'(\d+)\s*vehicles?',
                r'Stock:\s*(\d+)'
            ]
            
            for pattern in total_patterns:
                match = re.search(pattern, page_text, re.I)
                if match:
                    total = int(match.group(1))
                    if total > 1000:  # 合理的车辆总数
                        logger.info(f"从模式'{pattern}'获取总数: {total}")
                        return total
            
            logger.warning("未能找到总车辆数信息")
            return 0
            
        except Exception as e:
            logger.error(f"提取总车辆数失败: {e}")
            return 0
    
    def _extract_pagination_info(self, soup: BeautifulSoup) -> Dict[str, Any]:
        """从分页导航提取页数信息"""
        try:
            pagination_info = {
                'current_page': 1,
                'max_page': 0,
                'last_page_url': ''
            }
            
            # 查找分页链接 (如: 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435)
            page_links = soup.find_all('a', text=re.compile(r'^\d+$'))
            if page_links:
                page_numbers = []
                for link in page_links:
                    try:
                        page_num = int(link.get_text().strip())
                        page_numbers.append(page_num)
                        # 记录最后一页的URL
                        if page_num == max(page_numbers, default=0):
                            href = link.get('href', '')
                            if href:
                                pagination_info['last_page_url'] = href
                    except ValueError:
                        continue
                
                if page_numbers:
                    pagination_info['max_page'] = max(page_numbers)
                    logger.info(f"从分页导航获取最大页数: {pagination_info['max_page']}")
            
            # 查找当前页标识
            current_page_elem = soup.find('span', class_=re.compile(r'current|active')) or \
                              soup.find('strong', text=re.compile(r'^\d+$'))
            if current_page_elem:
                try:
                    pagination_info['current_page'] = int(current_page_elem.get_text().strip())
                except ValueError:
                    pass
            
            return pagination_info
            
        except Exception as e:
            logger.error(f"提取分页信息失败: {e}")
            return {'current_page': 1, 'max_page': 0, 'last_page_url': ''}
    
    def _extract_per_page_from_url(self, url: str) -> int:
        """从URL中提取每页显示数量"""
        try:
            # 从URL参数中提取limit值
            match = re.search(r'limit=(\d+)', url)
            if match:
                per_page = int(match.group(1))
                logger.info(f"从URL获取每页数量: {per_page}")
                return per_page
            
            # 默认每页20辆（根据网站默认设置）
            return 20
            
        except Exception as e:
            logger.error(f"提取每页数量失败: {e}")
            return 20
