from typing import List, Set, Tuple
import asyncio
from crawl4ai import AsyncWebCrawler
from utils.data_validator import DataValidator

class BasePaginationProcessor:
    """
    分页处理基类，封装通用分页爬取逻辑
    """
    def __init__(self, 
                 crawler: AsyncWebCrawler,
                 base_url: str,
                 css_selector: str,
                 session_id: str,
                 required_keys: List[str],
                 request_interval: int = 2):
        self.crawler = crawler
        self.base_url = base_url
        self.css_selector = css_selector
        self.session_id = session_id
        self.required_keys = required_keys
        self.request_interval = request_interval

    async def fetch_page(self, page_number: int, seen_names: Set[str]) -> Tuple[List[dict], bool]:
        """
        通用页面抓取方法
        
        Args:
            page_number: 当前页码
            seen_names: 已见过的名称集合
        """
        url = f"{self.base_url}?page={page_number}"
        result = await self.crawler.arun(
            url=url,
            config={
                "cache_mode": "bypass",
                "session_id": self.session_id,
                "css_selector": self.css_selector
            }
        )

        if not result.success:
            return [], False

        processed_data = []
        for item in result.extracted_content:
            if DataValidator.validate_venue(item, self.required_keys, seen_names):
                seen_names.add(item['name'])
                processed_data.append(item)

        return processed_data, "No Results Found" in result.cleaned_html

    async def process_pages(self, start_page: int = 1) -> List[dict]:
        """
        执行分页处理流程
        
        Args:
            start_page: 起始页码
        """
        page_num = start_page
        all_data = []
        seen_names = set()

        while True:
            data, stop_flag = await self.fetch_page(page_num, seen_names)
            if stop_flag or not data:
                break

            all_data.extend(data)
            page_num += 1
            await asyncio.sleep(self.request_interval)

        return all_data