"""
PicknBuy24爬取辅助函数
"""
from typing import Dict, Any, List
import asyncio
from axiom_boot.scraper import ScraperEngine, Target
from axiom_boot.logging.setup import get_logger

from ....scraper.extractors.picknbuy24_extractor.list_page_extractor import ListPageExtractor, VehicleUrlItem
from ....scraper.extractors.picknbuy24_extractor.vehicle_detail_extractor import VehicleDetailExtractor
from ....scraper.extractors.picknbuy24_extractor.image_only_extractor import ImageOnlyExtractor
from ....scraper.models.failure_record_request import DetailExtractFailureRequest
from ....scraper.services.scraper_failed_record_service import ScraperFailedRecordService

logger = get_logger(__name__)


async def scrape_picknbuy24_page_urls(page: int, engine: ScraperEngine, per_page: int = 100):
    """爬取PicknBuy24单页车辆URL列表 - 返回VehicleUrlItem对象"""
    target = Target(
        url=f"https://www.picknbuy24.com/usedcar/?sort=refno1&limit={per_page}&page={page}",
        extractor=ListPageExtractor()
    )
    
    results = await engine.scrape(target)
    return results  # 返回VehicleUrlItem对象而不是URL字符串


async def scrape_picknbuy24_vehicles_details(urls: List[str], engine: ScraperEngine) -> List[Dict[str, Any]]:
    """并发爬取PicknBuy24车辆详情"""
    async def scrape_single(url: str):
        try:
            target = Target(url=url, extractor=VehicleDetailExtractor(), metadata={"extract_images": False})
            results = await engine.scrape(target)
            
            for item in results:
                if hasattr(item, 'success') and item.success:
                    return item.vehicle_data
            return None
        except Exception:
            return None
    
    # 控制并发数
    semaphore = asyncio.Semaphore(5)
    
    async def scrape_with_limit(url):
        async with semaphore:
            return await scrape_single(url)
    
    # 并发执行
    results = await asyncio.gather(*[scrape_with_limit(url) for url in urls], return_exceptions=True)
    
    # 过滤有效结果
    return [r for r in results if r and not isinstance(r, Exception)]


async def scrape_picknbuy24_vehicle_images(detail_url: str, engine: ScraperEngine) -> List[Dict[str, Any]]:
    """仅爬取PicknBuy24车辆详情页的图片链接（不解析其他信息）"""
    try:
        # 使用专门的图片提取器，只解析图片链接
        target = Target(url=detail_url, extractor=ImageOnlyExtractor())
        results = await engine.scrape(target)
        
        for item in results:
            if hasattr(item, 'success') and item.success:
                return item.images
        
        return []
        
    except Exception:
        return []


async def scrape_picknbuy24_vehicles_details_with_failure_record(
    page_items, 
    engine: ScraperEngine, 
    failed_record_service: ScraperFailedRecordService = None,
    task_id: str = None
) -> List[Dict[str, Any]]:
    """并发爬取PicknBuy24车辆详情 - 支持失败记录"""
    logger.info(f"开始并发爬取 {len(page_items)} 个车辆详情")
    
    async def scrape_single(item):
        url = item.url if hasattr(item, 'url') else str(item)
        vehicle_type = item.vehicle_type if hasattr(item, 'vehicle_type') else ""
        ref_no = extract_ref_from_url(url)
        try:
            # 简化metadata，只传递vehicle_type
            metadata = {
                "extract_images": False, 
                "vehicle_type": vehicle_type
            }
            
            target = Target(url=url, extractor=VehicleDetailExtractor(), metadata=metadata)
            results = await engine.scrape(target)
            
            if not results:
                logger.warning(f"车辆 {ref_no} 详情爬取返回空结果集")
                if failed_record_service and task_id:
                    await failed_record_service.record_detail_extract_failure(
                        DetailExtractFailureRequest(
                            site_name="picknbuy24",
                            error=Exception("车辆详情爬取返回空结果集"),
                            task_id=task_id,
                            item_reference=ref_no,
                            target_url=url
                        )
                    )
                return None
            
            for item in results:
                if hasattr(item, 'success') and item.success:
                    logger.debug(f"车辆 {ref_no} 详情爬取成功")
                    return item.vehicle_data
                else:
                    logger.warning(f"车辆 {ref_no} 详情item.success={getattr(item, 'success', 'no_attr')}")
            
            # 详情提取无数据也算失败
            logger.warning(f"车辆 {ref_no} 详情爬取无有效数据")
            if failed_record_service and task_id:
                await failed_record_service.record_detail_extract_failure(
                    DetailExtractFailureRequest(
                        site_name="picknbuy24",
                        error=Exception("车辆详情爬取无有效数据"),
                        task_id=task_id,
                        item_reference=ref_no,
                        target_url=url
                    )
                )
            return None
        except Exception as e:
            logger.warning(f"车辆 {ref_no} 详情爬取异常: {e}")
            # 记录详情提取失败
            if failed_record_service and task_id:
                await failed_record_service.record_detail_extract_failure(
                    DetailExtractFailureRequest(
                        site_name="picknbuy24",
                        error=e,
                        task_id=task_id,
                        item_reference=ref_no,
                        target_url=url
                    )
                )
            return None
    
    # 控制并发数 - 保守并发避免失败（3个车辆详情并发）
    semaphore = asyncio.Semaphore(3)
    
    async def scrape_with_limit(item):
        async with semaphore:
            return await scrape_single(item)
    
    # 并发执行
    results = await asyncio.gather(*[scrape_with_limit(item) for item in page_items], return_exceptions=True)
    
    # 统计结果并过滤有效结果
    valid_results = []
    failed_count = 0
    
    for i, result in enumerate(results):
        if result and not isinstance(result, Exception):
            valid_results.append(result)
        else:
            failed_count += 1
            if isinstance(result, Exception):
                item = page_items[i] if i < len(page_items) else None
                url = item.url if item and hasattr(item, 'url') else 'unknown'
                ref_no = extract_ref_from_url(url) if url != 'unknown' else 'unknown'
                logger.warning(f"车辆 {ref_no} 详情爬取发生异常: {result}")
    
    logger.info(f"车辆详情爬取完成: 成功 {len(valid_results)}/{len(page_items)}, 失败 {failed_count}")
    
    return valid_results


def extract_ref_from_url(url: str) -> str:
    """从URL中提取ref_no"""
    import re
    match = re.search(r'refno=([^&]+)', url)
    return match.group(1) if match else ""
