"""
PicknBuy24重试任务
"""
from typing import List
from axiom_boot.task import task
from axiom_boot.logging.setup import get_logger

from ...scraper.services.picknbuy24_scraper_service import PicknBuy24ScraperService
from ...scraper.services.scraper_failed_record_service import ScraperFailedRecordService

logger = get_logger(__name__)


@task(name="retry_picknbuy24_failed_records_batch", timeout=1800)
async def retry_picknbuy24_failed_records_batch(
    task_id: str,
    main_task_id: str,
    failure_type: str,
    record_ids: List[int],  # 现在可以正常使用泛型类型注解了
    site_name: str,
    max_retry_count: int,
    picknbuy24_service: PicknBuy24ScraperService,
    failed_record_service: ScraperFailedRecordService
) -> dict:
    """
    批量重试失败记录任务 - 并行版本
    """
    try:
        logger.info(f"【{task_id}】开始批量重试 {failure_type} 类型失败记录: {len(record_ids)} 条")
        
        # 根据record_ids获取失败记录
        records = []
        for record_id in record_ids:
            record = await failed_record_service.find_by_pk(record_id)
            if record and record.retry_count < max_retry_count:
                records.append(record)
        
        if not records:
            logger.info(f"【{task_id}】没有有效的重试记录")
            return {"success": True, "retry_count": 0}
        
        # 根据失败类型调用相应的重试逻辑
        if failure_type == "detail_extract":
            success_count = await picknbuy24_service._retry_detail_extract_failures(records, task_id)
        elif failure_type == "data_save":
            success_count = await picknbuy24_service._retry_data_save_failures(records, task_id)
        elif failure_type == "file_download":
            success_count = await picknbuy24_service._retry_file_download_failures(records, task_id)
        else:
            logger.warning(f"【{task_id}】未知失败类型: {failure_type}")
            success_count = 0
        
        result = {
            "success": True,
            "task_id": task_id,
            "main_task_id": main_task_id,
            "failure_type": failure_type,
            "processed_count": len(records),
            "success_count": success_count,
            "failed_count": len(records) - success_count
        }
        
        logger.info(f"【{task_id}】批量重试完成: 成功 {success_count}/{len(records)}")
        return result
        
    except Exception as e:
        logger.error(f"【{task_id}】批量重试失败: {e}")
        return {"success": False, "error": str(e)}


@task(name="retry_picknbuy24_failed_records", timeout=3600)
async def retry_picknbuy24_failed_records(
    task_id: str,
    site_name: str,
    max_retry_count: int,
    picknbuy24_service: PicknBuy24ScraperService,
    failed_record_service: ScraperFailedRecordService
) -> dict:
    """
    重试PicknBuy24失败记录任务
    """
    try:
        # 执行重试逻辑
        result = await picknbuy24_service.execute_failed_records_only(
            task_id=task_id,
            site_name=site_name,
            max_retry_count=max_retry_count
        )
        
        logger.info(f"【{task_id}】失败记录重试任务完成: {result}")
        return result
        
    except Exception as e:
        logger.error(f"【{task_id}】失败记录重试任务失败: {e}")
        return {
            "task_id": task_id,
            "success": False,
            "error": str(e)
        }
