"""
爬虫页面状态映射器
"""
from typing import List, Optional, Dict, Any
from axiom_boot.database.base_mapper import BaseMapper
from axiom_boot.di import mapper

from ..models.scraper_page_status import ScraperPageStatus


@mapper()
class ScraperPageStatusMapper(BaseMapper[ScraperPageStatus]):
    """爬虫页面状态映射器"""
    
    def __init__(self):
        super().__init__()
    
    async def find_by_site_and_page(self, site_name: str, page_number: int, per_page: int) -> Optional[ScraperPageStatus]:
        """根据站点和页面号查找状态"""
        results = await self.find_by_filters(
            site_name=site_name,
            page_number=page_number,
            per_page=per_page,
            limit=1
        )
        return results[0] if results else None
    
    async def find_failed_pages(self, site_name: str, max_retry: int = 3) -> List[ScraperPageStatus]:
        """查找失败且未达到最大重试次数的页面"""
        return await self.find_by_filters(
            site_name=site_name,
            status="failed",
            retry_count__lt=max_retry,
            order_by=["page_number"]
        )
    
    async def find_pending_pages(self, site_name: str, limit: int = 100) -> List[ScraperPageStatus]:
        """查找待处理的页面"""
        return await self.find_by_filters(
            site_name=site_name,
            status="pending",
            limit=limit,
            order_by=["page_number"]
        )
    
    async def find_completed_pages(self, site_name: str) -> List[ScraperPageStatus]:
        """查找已完成的页面"""
        return await self.find_by_filters(
            site_name=site_name,
            status="completed",
            order_by=["page_number"]
        )
    
    async def get_site_stats(self, site_name: str) -> Dict[str, Any]:
        """获取站点统计信息"""
        # 使用框架的查询方法来获取统计信息
        all_pages = await self.find_by_filters(site_name=site_name)
        
        stats = {
            "pending": 0,
            "processing": 0, 
            "completed": 0,
            "failed": 0,
            "total_scraped": 0
        }
        
        for page in all_pages:
            status = page.status
            if status in stats:
                stats[status] += 1
            stats["total_scraped"] += page.scraped_count
        
        return stats