import json
from typing import List, Set, Tuple, Dict, Any

from crawl4ai import AsyncWebCrawler
from utils.data_utils import is_complete, is_duplicate
from crawlers.base_crawler import BaseCrawler


class TheKnotWeddingCrawler(BaseCrawler):
    """TheKnot Wedding网站的爬虫实现
    处理TheKnot Wedding网站特定的数据提取和处理逻辑
    """
    
    async def process_page_result(self, result, seen_names: Set[str]) -> Tuple[List[dict], bool]:
        """处理TheKnot Wedding页面结果
        
        Args:
            result: 爬虫运行结果
            seen_names: 已经见过的名称集合，用于去重
            
        Returns:
            Tuple[List[dict], bool]: 处理后的数据列表和是否没有更多结果的标志
        """
        if not (result.success and result.extracted_content):
            print(f"Error fetching page: {result.error_message}")
            return [], False

        # 解析提取的内容
        try:
            extracted_data = json.loads(result.extracted_content)
        except json.JSONDecodeError:
            print(f"Error parsing JSON from extracted content: {result.extracted_content}")
            return [], False
            
        if not extracted_data:
            print("No venues found on this page.")
            return [], False

        # 处理场馆数据
        complete_venues = []
        for venue in extracted_data:
            # 调试：打印每个场馆以了解其结构
            print("Processing venue:", venue)

            # 忽略'error'键（如果它是False）
            if venue.get("error") is False:
                venue.pop("error", None)  # 如果'error'键是False，则删除它

            # 检查场馆数据是否完整
            if not is_complete(venue, self.required_keys):
                continue  # 跳过不完整的场馆

            # 检查场馆名称是否重复
            if is_duplicate(venue["name"], seen_names):
                continue  # 跳过重复的场馆

            # 添加到已见过的名称集合
            seen_names.add(venue["name"])
            # 添加到完整场馆列表
            complete_venues.append(venue)

        print(f"Extracted {len(complete_venues)} complete venues from this page.")
        return complete_venues, False