"""
PicknBuy24重试辅助函数
"""
import asyncio
from typing import List, Dict, Any
from axiom_boot.scraper import ScraperEngine
from axiom_boot.logging.setup import get_logger
from ....scraper.services.scraper_failed_record_service import ScraperFailedRecordService
from ....scraper.models.failure_record_request import PageExtractFailureRequest, DetailExtractFailureRequest
from .scrapers import scrape_picknbuy24_page_urls, scrape_picknbuy24_vehicles_details_with_failure_record

logger = get_logger(__name__)


async def scrape_page_with_retry(
    page: int, 
    engine: ScraperEngine, 
    per_page: int, 
    max_retries: int = 3,
    failed_record_service: ScraperFailedRecordService = None, 
    task_id: str = None
):
    """带重试的页面URL爬取"""
    last_error = None
    target_url = f"https://www.picknbuy24.com/usedcar/?sort=refno1&limit={per_page}&page={page}"
    
    for attempt in range(max_retries):
        try:
            return await scrape_picknbuy24_page_urls(page, engine, per_page)
        except Exception as e:
            last_error = e
            if attempt < max_retries - 1:
                wait_time = (attempt + 1) * 2  # 递增等待时间
                logger.warning(f"第{page}页爬取失败，{wait_time}秒后重试 (第{attempt + 1}/{max_retries}次)")
                await asyncio.sleep(wait_time)
            else:
                logger.error(f"第{page}页爬取最终失败: {e}")
                # 记录页面提取最终失败
                if failed_record_service and task_id:
                    await failed_record_service.record_page_extract_failure(
                        site_name="picknbuy24",
                        page_number=page,
                        error=e,
                        target_url=target_url,
                        task_id=task_id
                    )
    
    return []


async def scrape_vehicles_with_retry(
    page_items, 
    engine: ScraperEngine, 
    max_retries: int = 2,
    failed_record_service: ScraperFailedRecordService = None, 
    task_id: str = None
) -> List[Dict[str, Any]]:
    """带重试的车辆详情爬取"""
    if not page_items:
        return []
    
    last_error = None
    
    for attempt in range(max_retries):
        try:
            return await scrape_picknbuy24_vehicles_details_with_failure_record(
                page_items, engine, failed_record_service, task_id
            )
        except Exception as e:
            last_error = e
            if attempt < max_retries - 1:
                wait_time = (attempt + 1) * 3
                logger.warning(f"车辆详情爬取失败，{wait_time}秒后重试 (第{attempt + 1}/{max_retries}次)")
                await asyncio.sleep(wait_time)
            else:
                logger.error(f"车辆详情爬取最终失败: {e}")
                # 记录批量车辆详情提取失败
                if failed_record_service and task_id:
                    from .scrapers import extract_ref_from_url
                    for item in page_items:
                        url = item.url if hasattr(item, 'url') else str(item)
                        ref_no = extract_ref_from_url(url)
                        await failed_record_service.record_detail_extract_failure(
                            site_name="picknbuy24",
                            item_reference=ref_no,
                            error=e,
                            target_url=url,
                            task_id=task_id
                        )
    
    return []
