#!/usr/bin/env python3
"""
大众点评爬虫 P4 版本 - 动态类别发现模块
负责真正的动态类别发现，替换硬编码配置
"""

import logging
import time
import re
from typing import Dict, List, Optional, Tuple, Any
from playwright.sync_api import sync_playwright, Page
from pathlib import Path
import sys

# 添加配置路径
sys.path.append(str(Path(__file__).parent.parent))
from config.settings import get_cookie_string

logger = logging.getLogger(__name__)

class CategoryDiscovery:
    """动态类别发现器 - 真正的动态类别检测"""
    
    def __init__(self, cookie_string: str = None):
        """初始化类别发现器

        Args:
            cookie_string: 可选的Cookie字符串，如果不提供则从配置获取
        """
        if cookie_string:
            self.cookie_string = cookie_string
        else:
            try:
                self.cookie_string = get_cookie_string()
            except Exception as e:
                logger.warning(f"无法从配置获取Cookie，使用空字符串: {e}")
                self.cookie_string = ""

        self.discovered_categories_cache = {}
        
        # 类别相关性关键词库（用于验证）
        self.category_keywords = {
            '日式料理': ['日式', '日本', '寿司', '居酒屋', '料理', '刺身', '拉面', '乌冬', '天妇罗'],
            '川菜': ['川菜', '四川', '麻辣', '火锅', '串串', '冒菜', '水煮', '宫保', '麻婆'],
            '粤菜': ['粤菜', '广东', '茶餐厅', '港式', '煲仔饭', '烧腊', '点心', '早茶'],
            '江浙菜': ['江浙', '杭帮', '本帮', '小笼包', '生煎', '糖醋', '上海', '杭州'],
            '西餐': ['西餐', '意大利', '牛排', '披萨', '汉堡', '咖啡', '法式', '德式'],
            '小吃快餐': ['小吃', '快餐', '便当', '面条', '包子', '饺子', '煎饼', '烧饼'],
            '烧烤烤鱼': ['烧烤', '烤鱼', '烤肉', '串串', '烤串', '炭火', '铁板'],
            '火锅': ['火锅', '麻辣烫', '涮锅', '毛肚', '鸳鸯锅', '清汤锅'],
            '自助餐': ['自助', 'buffet', '自助餐', '海鲜自助', '烤肉自助'],
            '面包甜点': ['面包', '甜点', '蛋糕', '烘焙', '西点', '马卡龙', '泡芙']
        }
        
    def parse_cookies(self) -> List[Dict[str, str]]:
        """解析Cookie字符串"""
        cookies = []

        # 清理Cookie字符串，移除引号和换行符
        cookie_string = self.cookie_string.strip()
        if cookie_string.startswith('"') and cookie_string.endswith('"'):
            cookie_string = cookie_string[1:-1]

        # 移除换行符和多余空格
        cookie_string = cookie_string.replace('\n', '').replace('\r', '').strip()

        if not cookie_string:
            logger.warning("[CATEGORY_DISCOVERY] Cookie字符串为空")
            return cookies

        try:
            for cookie_pair in cookie_string.split('; '):
                cookie_pair = cookie_pair.strip()
                if '=' in cookie_pair and cookie_pair:
                    name, value = cookie_pair.split('=', 1)
                    name = name.strip()
                    value = value.strip()

                    # 过滤掉空的cookie
                    if name and value:
                        cookies.append({
                            'name': name,
                            'value': value,
                            'domain': '.dianping.com',
                            'path': '/'
                        })

            logger.info(f"[CATEGORY_DISCOVERY] 解析到 {len(cookies)} 个有效Cookie")
            return cookies

        except Exception as e:
            logger.error(f"[CATEGORY_DISCOVERY] Cookie解析失败: {e}")
            return []
    
    def discover_city_categories(self, city_name: str, city_code: str, force_refresh: bool = False) -> List[Dict[str, Any]]:
        """为指定城市发现类别（优先使用动态发现，失败时使用固定品类）"""
        logger.info(f"[CATEGORY_DISCOVERY] 🔍 开始为 {city_name} 发现类别...")

        # 检查缓存
        cache_key = f"{city_name}_{city_code}"
        if not force_refresh and cache_key in self.discovered_categories_cache:
            logger.info(f"[CATEGORY_DISCOVERY] 使用缓存的 {city_name} 类别数据")
            cached_categories = self.discovered_categories_cache[cache_key]
            self._print_discovered_categories(cached_categories, city_name, "缓存")
            return cached_categories

        try:
            # 首先尝试动态发现
            logger.info(f"[CATEGORY_DISCOVERY] 🌐 尝试动态发现 {city_name} 的类别...")
            dynamic_categories = self._discover_categories_dynamically(city_name, city_code)

            if dynamic_categories and len(dynamic_categories) >= 5:  # 至少发现5个品类才认为成功
                # 缓存结果
                self.discovered_categories_cache[cache_key] = dynamic_categories
                logger.info(f"[CATEGORY_DISCOVERY] ✅ {city_name} 动态发现成功，共 {len(dynamic_categories)} 个类别")
                self._print_discovered_categories(dynamic_categories, city_name, "动态发现")
                return dynamic_categories
            else:
                logger.warning(f"[CATEGORY_DISCOVERY] 动态发现结果不足，使用固定类别方案")

            # 动态发现失败，使用固定的20个品类
            logger.info(f"[CATEGORY_DISCOVERY] 📋 使用固定类别方案...")
            categories = self._get_fixed_categories(city_code)

            if categories:
                # 缓存结果
                self.discovered_categories_cache[cache_key] = categories
                logger.info(f"[CATEGORY_DISCOVERY] ✅ {city_name} 固定类别加载成功，共 {len(categories)} 个类别")
                self._print_discovered_categories(categories, city_name, "固定配置")
                return categories
            else:
                logger.warning(f"[CATEGORY_DISCOVERY] {city_name} 固定类别加载失败，使用备用配置")
                fallback_categories = self._get_fallback_categories(city_code)
                self._print_discovered_categories(fallback_categories, city_name, "备用配置")
                return fallback_categories

        except Exception as e:
            logger.error(f"[CATEGORY_DISCOVERY] {city_name} 类别获取异常: {e}")
            fallback_categories = self._get_fallback_categories(city_code)
            self._print_discovered_categories(fallback_categories, city_name, "异常备用")
            return fallback_categories

    def _print_discovered_categories(self, categories: List[Dict[str, Any]], city_name: str, source: str):
        """打印发现的类别详情"""
        logger.info(f"[CATEGORY_DISCOVERY] 📊 {city_name} 类别详情 ({source}):")
        for i, cat in enumerate(categories):
            logger.info(f"[CATEGORY_DISCOVERY]   {i+1:2d}. {cat['name']:12s} (ID: {cat['id']:8s}) - {cat.get('source', 'unknown')}")

        # 特别检查是否包含"烤肉"
        roast_meat_categories = [cat for cat in categories if '烤肉' in cat['name']]
        if roast_meat_categories:
            logger.info(f"[CATEGORY_DISCOVERY] ✅ 发现烤肉相关类别: {[cat['name'] for cat in roast_meat_categories]}")
        else:
            logger.warning(f"[CATEGORY_DISCOVERY] ⚠️ 未发现烤肉相关类别")

    def _get_fixed_categories(self, city_code: str) -> List[Dict[str, Any]]:
        """获取固定的品类列表（包含烤肉等常见品类）"""
        # 基于大众点评实际品类ID的固定品类列表
        fixed_categories = [
            # 主要品类
            {'name': '小吃快餐', 'id': 'g112'},
            {'name': '粤菜', 'id': 'g103'},
            {'name': '川菜', 'id': 'g102'},
            {'name': '日式料理', 'id': 'g113'},
            {'name': '火锅', 'id': 'g110'},
            {'name': '西餐', 'id': 'g116'},
            {'name': '湘菜', 'id': 'g104'},
            {'name': '韩式料理', 'id': 'g114'},

            # 烧烤烤肉类
            {'name': '烤肉', 'id': 'g34303'},  # 添加烤肉品类
            {'name': '烧烤烤串', 'id': 'g508'},
            {'name': '烧烤烤鱼', 'id': 'g121'},

            # 其他常见品类
            {'name': '面包蛋糕甜品', 'id': 'g117'},
            {'name': '咖啡', 'id': 'g132'},
            {'name': '自助餐', 'id': 'g111'},
            {'name': '小龙虾', 'id': 'g219'},
            {'name': '鱼鲜海鲜', 'id': 'g251'},
            {'name': '江浙菜', 'id': 'g101'},
            {'name': '东北菜', 'id': 'g108'},
            {'name': '新疆菜', 'id': 'g124'},
            {'name': '云南菜', 'id': 'g107'},

            # 特色品类
            {'name': '饮品', 'id': 'g34236'},
            {'name': '粥粉面', 'id': 'g1959'},
            {'name': '面馆', 'id': 'g215'},
            {'name': '素食', 'id': 'g34284'}
        ]

        # 为每个类别添加完整信息
        categories = []
        for cat in fixed_categories:
            categories.append({
                'name': cat['name'],
                'id': cat['id'],
                'url': f"https://www.dianping.com/{city_code}/ch10/{cat['id']}",
                'source': 'fixed_categories'
            })

        return categories

    def _discover_categories_dynamically(self, city_name: str, city_code: str) -> List[Dict[str, Any]]:
        """动态发现城市品类（使用浏览器自动化）"""
        logger.info(f"[CATEGORY_DISCOVERY] 🌐 开始动态发现 {city_name} ({city_code}) 的品类...")

        if not self.cookie_string:
            logger.warning("[CATEGORY_DISCOVERY] Cookie为空，无法进行动态发现")
            return []

        try:
            # 调用现有的页面提取方法
            categories = self._extract_categories_from_page(city_name, city_code)

            if categories:
                logger.info(f"[CATEGORY_DISCOVERY] ✅ 动态发现成功，找到 {len(categories)} 个品类")
                return categories
            else:
                logger.warning(f"[CATEGORY_DISCOVERY] ⚠️ 动态发现未找到品类")
                return []

        except Exception as e:
            logger.error(f"[CATEGORY_DISCOVERY] 动态发现异常: {e}")
            return []

    def check_roast_meat_availability(self, city_name: str, city_code: str) -> Dict[str, Any]:
        """专门检查烤肉品类的可用性"""
        logger.info(f"[CATEGORY_DISCOVERY] 🥩 专门检查 {city_name} 的烤肉品类可用性...")

        # 可能的烤肉相关品类ID
        roast_meat_ids = [
            'g34303',  # 烤肉
            'g508',    # 烧烤烤串
            'g121',    # 烧烤烤鱼
            'g34304',  # 韩式烤肉
            'g34305'   # 日式烤肉
        ]

        available_roast_categories = []

        for category_id in roast_meat_ids:
            try:
                # 构建测试URL
                test_url = f"https://www.dianping.com/{city_code}/ch10/{category_id}"
                logger.info(f"[CATEGORY_DISCOVERY] 测试烤肉品类: {category_id} - {test_url}")

                # 这里可以添加实际的URL可用性检查
                # 暂时基于ID推断品类名称
                category_name = self._get_category_name_by_id(category_id)
                if category_name:
                    available_roast_categories.append({
                        'name': category_name,
                        'id': category_id,
                        'url': test_url,
                        'source': 'roast_meat_check'
                    })

            except Exception as e:
                logger.debug(f"[CATEGORY_DISCOVERY] 烤肉品类 {category_id} 检查失败: {e}")

        result = {
            'city_name': city_name,
            'city_code': city_code,
            'available_roast_categories': available_roast_categories,
            'has_roast_meat': len(available_roast_categories) > 0
        }

        if result['has_roast_meat']:
            logger.info(f"[CATEGORY_DISCOVERY] ✅ {city_name} 发现 {len(available_roast_categories)} 个烤肉相关品类:")
            for cat in available_roast_categories:
                logger.info(f"[CATEGORY_DISCOVERY]   - {cat['name']} (ID: {cat['id']})")
        else:
            logger.warning(f"[CATEGORY_DISCOVERY] ⚠️ {city_name} 未发现烤肉相关品类")

        return result

    def _get_category_name_by_id(self, category_id: str) -> str:
        """根据品类ID获取品类名称"""
        id_name_mapping = {
            'g34303': '烤肉',
            'g508': '烧烤烤串',
            'g121': '烧烤烤鱼',
            'g34304': '韩式烤肉',
            'g34305': '日式烤肉',
            'g113': '日式料理',
            'g117': '面包蛋糕甜品',
            'g110': '火锅',
            'g112': '小吃快餐',
            'g103': '粤菜',
            'g102': '川菜',
            'g116': '西餐',
            'g104': '湘菜',
            'g114': '韩式料理'
        }
        return id_name_mapping.get(category_id, f'未知品类({category_id})')

    def _extract_categories_from_page(self, city_name: str, city_code: str) -> List[Dict[str, Any]]:
        """从页面提取真实类别信息"""
        categories = []
        
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=False)  # 使用可见模式便于调试
            context = browser.new_context(
                user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                viewport={'width': 1920, 'height': 1080}
            )
            
            cookies = self.parse_cookies()
            context.add_cookies(cookies)
            page = context.new_page()
            
            try:
                # 访问城市美食首页
                url = f"https://www.dianping.com/{city_code}/ch10"
                logger.info(f"[CATEGORY_DISCOVERY] 访问页面: {url}")
                
                page.goto(url, timeout=30000)
                page.wait_for_load_state('networkidle', timeout=20000)
                
                # 检查登录状态
                if 'login' in page.url.lower():
                    logger.error("[CATEGORY_DISCOVERY] 被重定向到登录页面")
                    return []
                
                # 尝试点击"更多"按钮展开所有类别
                self._click_more_button(page)
                
                # 提取类别信息
                categories = self._extract_category_elements(page, city_code)
                
                return categories
                
            except Exception as e:
                logger.error(f"[CATEGORY_DISCOVERY] 页面处理异常: {e}")
                return []
            
            finally:
                browser.close()
    
    def _click_more_button(self, page: Page) -> bool:
        """点击"更多"按钮展开所有类别"""
        try:
            logger.info("[CATEGORY_DISCOVERY] 🔍 寻找'更多'按钮...")

            # 基于实际DOM结构的"更多"按钮选择器（根据大众点评实际页面结构优化）
            more_selectors = [
                # 基于文本内容的选择器
                'a:has-text("更多")',
                'span:has-text("更多")',
                'div:has-text("更多")',

                # 基于href属性的选择器
                'a[href="javascript:;"]',  # 大众点评的更多按钮通常是javascript链接
                'a[href="javascript:void(0)"]',

                # 基于class的选择器
                '.more-btn',
                '.expand-btn',
                '.category-more',
                '.more-category',
                '.show-more',
                '.expand-all',

                # 基于title属性的选择器
                'a[title*="更多"]',
                'a[title*="展开"]',
                'a[title*="全部"]',

                # 其他可能的选择器
                'a:has-text("全部分类")',
                'a:has-text("展开")',
                'a:has-text("查看更多")',

                # 基于位置的选择器（通常在分类区域的底部）
                '.category-list .more',
                '.cate-list .more',
                '.filter-list .more'
            ]

            # 先检查页面上所有可能的"更多"相关元素
            logger.info("[CATEGORY_DISCOVERY] 🔍 扫描页面中的所有可能按钮...")
            all_links = page.query_selector_all('a, span, div')
            more_candidates = []

            for element in all_links[:50]:  # 限制检查前50个元素
                try:
                    text = element.inner_text().strip()
                    if text and ('更多' in text or '展开' in text or '全部' in text):
                        href = element.get_attribute('href') or 'no-href'
                        class_name = element.get_attribute('class') or 'no-class'
                        more_candidates.append({
                            'text': text,
                            'href': href,
                            'class': class_name,
                            'element': element
                        })
                except:
                    continue

            if more_candidates:
                logger.info(f"[CATEGORY_DISCOVERY] 发现 {len(more_candidates)} 个候选按钮:")
                for i, candidate in enumerate(more_candidates):
                    logger.info(f"[CATEGORY_DISCOVERY]   {i+1}. '{candidate['text']}' - href: {candidate['href']} - class: {candidate['class']}")

            # 尝试点击候选按钮
            for candidate in more_candidates:
                try:
                    element = candidate['element']
                    if element.is_visible():
                        logger.info(f"[CATEGORY_DISCOVERY] 尝试点击: '{candidate['text']}'")
                        element.click()
                        time.sleep(3)  # 等待展开动画完成
                        logger.info(f"[CATEGORY_DISCOVERY] ✅ 成功点击按钮: '{candidate['text']}'")
                        return True
                except Exception as e:
                    logger.debug(f"[CATEGORY_DISCOVERY] 点击候选按钮失败: {e}")
                    continue

            # 如果候选按钮都失败，尝试原有的选择器方法
            logger.info("[CATEGORY_DISCOVERY] 候选按钮都失败，尝试选择器方法...")
            for i, selector in enumerate(more_selectors):
                try:
                    more_button = page.query_selector(selector)
                    if more_button and more_button.is_visible():
                        button_text = more_button.inner_text().strip()
                        logger.info(f"[CATEGORY_DISCOVERY] 选择器 {i+1}/{len(more_selectors)}: {selector} - 找到按钮: '{button_text}'")

                        if "更多" in button_text or "展开" in button_text or "全部" in button_text:
                            more_button.click()
                            time.sleep(3)  # 等待展开动画完成
                            logger.info(f"[CATEGORY_DISCOVERY] ✅ 成功点击更多按钮: {selector}")
                            return True
                except Exception as e:
                    logger.debug(f"[CATEGORY_DISCOVERY] 选择器 {selector} 失败: {e}")
                    continue

            logger.info("[CATEGORY_DISCOVERY] ⚠️ 未找到可点击的更多按钮，可能已展开所有类别")
            return False

        except Exception as e:
            logger.warning(f"[CATEGORY_DISCOVERY] 点击更多按钮异常: {e}")
            return False
    
    def _extract_category_elements(self, page: Page, city_code: str) -> List[Dict[str, Any]]:
        """提取页面中的类别元素"""
        categories = []

        try:
            # 等待页面加载完成
            logger.info("[CATEGORY_DISCOVERY] 等待页面加载完成...")
            page.wait_for_load_state('networkidle', timeout=15000)

            # 基于实际DOM结构的类别选择器（优化顺序，最可能的放在前面）
            category_selectors = [
                # 最常见的大众点评类别链接格式
                f'a[href*="/{city_code}/ch10/g"]',  # 包含城市和类别ID的链接
                'a[href*="/ch10/g"]',  # 包含类别ID的链接

                # 基于页面结构的选择器
                'div:has(> h4:has-text("分类:")) a[href*="/g"]',  # 分类区域下的链接
                '.category-list a[href*="/g"]',
                '.cate-list a[href*="/g"]',
                '.filter-item a[href*="/g"]',
                '.category-item a[href*="/g"]',

                # 更通用的选择器
                '.category-list a',
                '.cate-list a',
                '.filter-item a',
                '.category-item a',

                # 基于文本内容的选择器（针对常见品类）
                'a:has-text("烤肉")',
                'a:has-text("日式料理")',
                'a:has-text("面包蛋糕甜品")',
                'a:has-text("川菜")',
                'a:has-text("粤菜")',
                'a:has-text("火锅")'
            ]

            logger.info(f"[CATEGORY_DISCOVERY] 🔍 开始使用 {len(category_selectors)} 个选择器提取类别...")

            for i, selector in enumerate(category_selectors):
                try:
                    logger.info(f"[CATEGORY_DISCOVERY] 尝试选择器 {i+1}/{len(category_selectors)}: {selector}")
                    elements = page.query_selector_all(selector)

                    if elements:
                        logger.info(f"[CATEGORY_DISCOVERY] ✅ 选择器找到 {len(elements)} 个元素")

                        for j, element in enumerate(elements):
                            try:
                                category_info = self._parse_category_element(element, city_code)
                                if category_info:
                                    categories.append(category_info)
                                    logger.debug(f"[CATEGORY_DISCOVERY]   解析元素 {j+1}: {category_info['name']} (ID: {category_info['id']})")
                            except Exception as e:
                                logger.debug(f"[CATEGORY_DISCOVERY]   解析元素 {j+1} 失败: {e}")

                        if categories:
                            logger.info(f"[CATEGORY_DISCOVERY] ✅ 选择器 {selector} 成功提取到 {len(categories)} 个类别")
                            break  # 找到类别就停止尝试其他选择器
                    else:
                        logger.debug(f"[CATEGORY_DISCOVERY] 选择器 {selector} 未找到元素")

                except Exception as e:
                    logger.debug(f"[CATEGORY_DISCOVERY] 选择器 {selector} 处理失败: {e}")
                    continue

            # 如果没有找到类别，尝试更通用的方法
            if not categories:
                logger.warning("[CATEGORY_DISCOVERY] ⚠️ 所有选择器都失败，尝试通用方法提取类别")
                categories = self._extract_categories_generic(page, city_code)

            # 打印原始提取结果
            logger.info(f"[CATEGORY_DISCOVERY] 📊 原始提取结果: {len(categories)} 个类别")
            for cat in categories:
                logger.info(f"[CATEGORY_DISCOVERY]   - {cat['name']} (ID: {cat['id']}, 来源: {cat.get('source', 'unknown')})")

            # 去重和验证
            logger.info("[CATEGORY_DISCOVERY] 🔧 开始去重和验证...")
            categories = self._deduplicate_categories(categories)
            categories = self._validate_categories(categories)

            logger.info(f"[CATEGORY_DISCOVERY] ✅ 最终结果: {len(categories)} 个有效类别")
            return categories

        except Exception as e:
            logger.error(f"[CATEGORY_DISCOVERY] 提取类别元素失败: {e}")
            return []
    
    def _extract_categories_generic(self, page: Page, city_code: str) -> List[Dict[str, Any]]:
        """通用方法提取类别"""
        categories = []
        try:
            # 查找所有包含类别ID的链接
            all_links = page.query_selector_all('a[href]')
            for link in all_links:
                href = link.get_attribute('href')
                if href and '/ch10/g' in href:
                    category_info = self._parse_category_element(link, city_code)
                    if category_info:
                        categories.append(category_info)

            logger.info(f"[CATEGORY_DISCOVERY] 通用方法找到 {len(categories)} 个类别")
            return categories

        except Exception as e:
            logger.error(f"[CATEGORY_DISCOVERY] 通用方法提取失败: {e}")
            return []

    def _parse_category_element(self, element, city_code: str) -> Optional[Dict[str, Any]]:
        """解析单个类别元素"""
        try:
            # 获取类别名称
            name = element.inner_text().strip()
            if not name or len(name) > 20 or len(name) < 2:  # 过滤异常名称
                return None

            # 过滤非类别链接和地理位置相关链接
            excluded_names = [
                '不限', '更多', '收起', '热门商区', '行政区', '地铁线',
                '购物公园', '海岸城', '车公庙', '蛇口', '国贸', '宝安南路',
                '上沙', '下沙', '南山中心区', '梅林', '科技园', '桃园',
                '东门商业圈', '南油', '海雅缤纷城', '华强北', '前海',
                '八卦岭', '园岭', '宝安中心区', '布吉街', '东站', '坂田',
                '杨美', '布心', '太白路', '白石洲', '宝体', '宝安国际机场',
                '宝立方', '民治', '华侨城', '海上世界', '深圳湾', '水榭春天',
                '福田中心', '岗厦'
            ]

            if name in excluded_names:
                return None

            # 过滤包含地理位置关键词的名称
            location_keywords = ['区', '街', '路', '广场', '中心', '城', '园', '站', '湾', '山', '岭']
            if any(keyword in name for keyword in location_keywords) and len(name) > 4:
                return None

            # 获取链接和ID
            href = element.get_attribute('href')
            if not href:
                return None

            # 提取类别ID
            category_id = self._extract_category_id(href)
            if not category_id:
                return None

            # 过滤掉ID过大的类别（通常是二级分类）
            try:
                id_num = int(category_id[1:])  # 去掉'g'前缀
                if id_num > 50000:  # 大于50000的通常是细分类别
                    return None
            except:
                pass

            # 构建完整URL
            if href.startswith('/'):
                url = f"https://www.dianping.com{href}"
            else:
                url = href

            return {
                'name': name,
                'id': category_id,
                'url': url,
                'source': 'dynamic_discovery',
                'href': href
            }
            
        except Exception as e:
            logger.debug(f"[CATEGORY_DISCOVERY] 解析类别元素失败: {e}")
            return None
    
    def _extract_category_id(self, href: str) -> Optional[str]:
        """从链接中提取类别ID"""
        try:
            # 匹配类别ID模式 (如 g113, g102 等)
            match = re.search(r'/g(\d+)', href)
            if match:
                return f"g{match.group(1)}"
            
            # 其他可能的ID模式
            match = re.search(r'categoryid=(\d+)', href)
            if match:
                return f"g{match.group(1)}"
            
            return None
            
        except Exception as e:
            logger.debug(f"[CATEGORY_DISCOVERY] 提取类别ID失败: {e}")
            return None
    
    def _deduplicate_categories(self, categories: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """去重类别列表"""
        seen_ids = set()
        seen_names = set()
        unique_categories = []
        
        for category in categories:
            cat_id = category.get('id')
            cat_name = category.get('name')
            
            if cat_id not in seen_ids and cat_name not in seen_names:
                seen_ids.add(cat_id)
                seen_names.add(cat_name)
                unique_categories.append(category)
        
        logger.info(f"[CATEGORY_DISCOVERY] 去重后保留 {len(unique_categories)} 个类别")
        return unique_categories
    
    def _validate_categories(self, categories: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """验证类别的有效性"""
        valid_categories = []
        
        for category in categories:
            name = category.get('name', '')
            
            # 基本验证
            if len(name) < 2 or len(name) > 10:
                continue
            
            # 排除无关类别
            exclude_keywords = ['全部', '推荐', '热门', '附近', '地图', '筛选']
            if any(keyword in name for keyword in exclude_keywords):
                continue
            
            valid_categories.append(category)
        
        logger.info(f"[CATEGORY_DISCOVERY] 验证后保留 {len(valid_categories)} 个有效类别")
        return valid_categories
    
    def _get_fallback_categories(self, city_code: str) -> List[Dict[str, Any]]:
        """获取备用类别配置"""
        fallback_categories = [
            {'name': '日式料理', 'id': 'g113', 'source': 'fallback'},
            {'name': '川菜', 'id': 'g102', 'source': 'fallback'},
            {'name': '江浙菜', 'id': 'g101', 'source': 'fallback'},
            {'name': '小吃快餐', 'id': 'g116', 'source': 'fallback'},
            {'name': '粤菜', 'id': 'g103', 'source': 'fallback'},
            {'name': '火锅', 'id': 'g110', 'source': 'fallback'}
        ]
        
        for category in fallback_categories:
            category['url'] = f"https://www.dianping.com/{city_code}/ch10/{category['id']}"
        
        logger.info(f"[CATEGORY_DISCOVERY] 使用备用配置，共 {len(fallback_categories)} 个类别")
        return fallback_categories
    
    def test_category_discovery(self, city_name: str, city_code: str) -> Dict[str, Any]:
        """测试类别发现功能"""
        logger.info(f"[CATEGORY_DISCOVERY] 开始测试 {city_name} 的类别发现...")
        
        start_time = time.time()
        categories = self.discover_city_categories(city_name, city_code, force_refresh=True)
        end_time = time.time()
        
        test_result = {
            'city_name': city_name,
            'city_code': city_code,
            'categories_found': len(categories),
            'discovery_time': round(end_time - start_time, 2),
            'categories': categories,
            'success': len(categories) > 0
        }
        
        logger.info(f"[CATEGORY_DISCOVERY] 测试完成: {test_result['success']}")
        return test_result
    
    def get_category_cache_info(self) -> Dict[str, Any]:
        """获取类别缓存信息"""
        return {
            'cached_cities': list(self.discovered_categories_cache.keys()),
            'total_cached': len(self.discovered_categories_cache),
            'cache_details': {
                city: len(categories) 
                for city, categories in self.discovered_categories_cache.items()
            }
        }
