"""
PicknBuy24爬取任务
"""
from typing import Dict, Any
import asyncio
from axiom_boot.task import task
from axiom_boot.scraper import ScraperEngine
from axiom_boot.logging.setup import get_logger

from ...vehicle.service.vehicle_service import VehicleService
from ...scraper.services.page_status_service import PageStatusService
from ...scraper.services.scraper_failed_record_service import ScraperFailedRecordService
from ...scraper.services.scraper_task_service import ScraperTaskService
from ...scraper.models.failure_record_request import DataSaveFailureRequest

from .helpers.retries import scrape_page_with_retry, scrape_vehicles_with_retry

logger = get_logger(__name__)


@task(name="scrape_picknbuy24_vehicles", timeout=2400)
async def scrape_picknbuy24_vehicles(
    start_page: int,
    end_page: int,
    task_id: str,
    per_page: int,
    vehicle_service: VehicleService,
    engine: ScraperEngine,
    page_status_service: PageStatusService,
    failed_record_service: ScraperFailedRecordService,
    scraper_task_service: ScraperTaskService
) -> dict:
    """
    PicknBuy24车辆爬取任务 - 小批次高并发处理
    每个任务处理10页，页面内并发爬取
    """
    logger.info(f"【{task_id}】开始爬取PicknBuy24页面 {start_page}-{end_page}")
    
    all_vehicles_data = []
    total_processed = 0
    failed_pages = 0
    success_count = 0
    failed_count = 0
    
    try:
        async def process_page(page: int) -> int:
            """处理单页并立即保存，带状态跟踪"""
            site_name = "picknbuy24"
            
            try:
                # 定期任务模式：不检查页面状态，每次都重新爬取
                logger.info(f"【{task_id}】开始爬取第{page}页（定期任务模式）")
                
                # 标记页面开始处理
                await page_status_service.mark_page_started(site_name, page, per_page, task_id)
                
                # 爬取页面URL列表（带重试）
                page_items = await scrape_page_with_retry(
                    page, engine, per_page, max_retries=3, 
                    failed_record_service=failed_record_service, task_id=task_id
                )
                if not page_items:
                    await page_status_service.mark_page_failed(site_name, page, per_page, "页面无数据")
                    return 0
                
                # 并发爬取车辆详情（带重试）
                vehicles_data = await scrape_vehicles_with_retry(
                    page_items, engine, max_retries=2,
                    failed_record_service=failed_record_service, task_id=task_id
                )
                if not vehicles_data:
                    await page_status_service.mark_page_failed(site_name, page, per_page, "车辆详情为空")
                    return 0
                
                # 立即保存到数据库（启用更新模式）
                try:
                    logger.info(f"🔄 【{task_id}】第{page}页准备保存 {len(vehicles_data)} 辆车数据...")
                    saved_count = await vehicle_service.batch_save_vehicles_only(
                        vehicles_data, source="picknbuy24", update_existing=True
                    )
                    logger.info(f"✅ 【{task_id}】第{page}页保存完成！成功保存 {saved_count} 辆车")
                except Exception as e:
                    logger.error(f"❌ 【{task_id}】第{page}页批量保存失败: {e}")
                    # 记录每个车辆的保存失败
                    for vehicle_data in vehicles_data:
                        ref_no = vehicle_data.get('ref_no')
                        if ref_no:
                            await failed_record_service.record_data_save_failure(
                                DataSaveFailureRequest(
                                    site_name="picknbuy24",
                                    error=e,
                                    task_id=task_id,
                                    item_reference=ref_no,
                                    failure_data=vehicle_data
                                )
                            )
                    saved_count = 0
                
                # 标记页面完成
                await page_status_service.mark_page_completed(site_name, page, per_page, saved_count, len(page_items))
                
                logger.info(f"【{task_id}】第{page}页完成: 爬取{len(vehicles_data)}辆，保存{saved_count}辆")
                
                # 更新全局统计
                nonlocal total_processed, success_count
                total_processed += len(vehicles_data)
                success_count += saved_count
                
                return saved_count
                
            except Exception as e:
                # 标记页面失败
                await page_status_service.mark_page_failed(site_name, page, per_page, str(e))
                logger.error(f"【{task_id}】第{page}页处理失败: {e}")
                
                # 更新失败统计
                nonlocal failed_count
                failed_count += per_page  # 按每页数量估算失败数
                
                return 0
        
        # 并发处理所有页面
        page_results = await asyncio.gather(
            *[process_page(page) for page in range(start_page, end_page + 1)],
            return_exceptions=True
        )
        
        # 汇总结果（每页已单独保存）
        total_saved = 0
        for i, result in enumerate(page_results):
            if isinstance(result, int):
                total_saved += result
            elif isinstance(result, Exception):
                failed_pages += 1
                logger.error(f"【{task_id}】第{start_page + i}页异常: {result}")
            else:
                failed_pages += 1
        
        success_pages = (end_page - start_page + 1) - failed_pages
        logger.info(f"【{task_id}】任务完成: 成功{success_pages}页，失败{failed_pages}页，总保存{total_saved}辆")
        
        # 计算成功率
        success_rate = (success_count / total_processed * 100) if total_processed > 0 else 0
        
        # 完成任务到数据库
        try:
            final_stats = {
                "total_processed": total_processed,
                "success_count": success_count,
                "failed_count": failed_count,
                "success_rate": success_rate,
                "failed_items": failed_pages
            }
            await scraper_task_service.complete_task(
                task_id=task_id,
                final_stats=final_stats
            )
            logger.info(f"【{task_id}】任务已完成并更新到数据库 - 成功率: {success_rate:.1f}%")
        except Exception as e:
            logger.error(f"【{task_id}】完成任务失败: {e}")
        
        return {
            "success": True,
            "task_id": task_id,
            "pages_range": f"{start_page}-{end_page}",
            "success_pages": success_pages,
            "failed_pages": failed_pages,
            "total_saved": total_saved,
            "total_processed": total_processed,
            "success_count": success_count,
            "failed_count": failed_count,
            "success_rate": success_rate
        }
        
    except Exception as e:
        logger.error(f"【{task_id}】任务失败: {e}")
        
        # 计算成功率
        success_rate = (success_count / total_processed * 100) if total_processed > 0 else 0
        
        # 完成任务到数据库（失败状态）
        try:
            final_stats = {
                "total_processed": total_processed,
                "success_count": success_count,
                "failed_count": failed_count,
                "success_rate": success_rate,
                "failed_items": []
            }
            await scraper_task_service.complete_task(
                task_id=task_id,
                final_stats=final_stats,
                error_summary=str(e)
            )
            logger.info(f"【{task_id}】任务失败状态已更新到数据库")
        except Exception as update_error:
            logger.error(f"【{task_id}】更新任务失败状态失败: {update_error}")
        
        return {
            "success": False,
            "task_id": task_id,
            "total_processed": total_processed,
            "success_count": success_count,
            "failed_count": failed_count,
            "failed_pages": failed_pages,
            "error": str(e)
        }
