#!/usr/bin/env python3
"""
大众点评动态品类发现核心模块
解决静态品类配置导致的数据污染问题
实现城市→品类自动发现→验证的完整流程
"""

import time
import re
import json
import random
from datetime import datetime
from playwright.sync_api import sync_playwright
import logging

logger = logging.getLogger(__name__)

class DynamicCategoryDiscovery:
    """动态品类发现器"""
    
    def __init__(self, cookie_string):
        self.cookie_string = cookie_string
        self.discovered_categories = {}
        
        # 品类相关性关键词映射
        self.category_keywords = {
            '日式料理': ['日式', '日本', '寿司', '刺身', '居酒屋', '拉面', '丼', '料理', '和食', '天妇罗'],
            '川菜': ['川菜', '四川', '麻辣', '火锅', '串串', '冒菜', '担担面', '回锅肉', '水煮', '宫保'],
            '粤菜': ['粤菜', '广东', '茶餐厅', '港式', '煲仔饭', '烧腊', '点心', '叉烧', '白切鸡'],
            '江浙菜': ['江浙', '杭帮', '本帮', '淮扬', '小笼包', '生煎', '糖醋', '红烧', '清蒸'],
            '西餐': ['西餐', '意大利', '法式', '牛排', '披萨', '汉堡', '咖啡', '沙拉', '意面'],
            '小吃快餐': ['小吃', '快餐', '便当', '盖饭', '面条', '包子', '饺子', '煎饼', '米线'],
            '烧烤烤鱼': ['烧烤', '烤鱼', '烤肉', '串串', '烤串', '炭火', '铁板', '烤翅'],
            '湘菜': ['湘菜', '湖南', '辣椒', '剁椒', '口味', '臭豆腐', '米粉'],
            '东北菜': ['东北', '锅包肉', '地三鲜', '酸菜', '血肠', '杀猪菜'],
            '新疆菜': ['新疆', '羊肉串', '大盘鸡', '抓饭', '馕', '拌面']
        }
        
    def parse_cookies(self):
        """解析Cookie字符串"""
        cookies = []
        for cookie_pair in self.cookie_string.split('; '):
            if '=' in cookie_pair:
                name, value = cookie_pair.split('=', 1)
                cookies.append({
                    'name': name.strip(),
                    'value': value.strip(),
                    'domain': '.dianping.com',
                    'path': '/'
                })
        return cookies
        
    def discover_city_categories(self, city_code):
        """发现指定城市的所有有效品类"""
        logger.info(f"[DISCOVERY] 开始发现 {city_code} 的品类结构...")
        
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=False)
            context = browser.new_context(
                user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                viewport={'width': 1920, 'height': 1080}
            )
            
            # 设置Cookie
            cookies = self.parse_cookies()
            context.add_cookies(cookies)
            page = context.new_page()
            
            try:
                # 访问城市美食首页
                url = f"https://www.dianping.com/{city_code}/ch10"
                logger.info(f"[DISCOVERY] 访问品类发现页面: {url}")
                
                page.goto(url, timeout=30000)
                page.wait_for_load_state('networkidle', timeout=20000)
                
                # 检查Cookie有效性
                if 'login' in page.url.lower():
                    logger.error("[DISCOVERY] Cookie失效，需要重新登录")
                    return None
                
                # 用户行为模拟
                time.sleep(random.uniform(3, 6))
                page.mouse.move(random.randint(300, 800), random.randint(200, 600))
                time.sleep(random.uniform(2, 4))
                
                # 提取品类信息
                categories = self.extract_categories_from_page(page, city_code)
                
                if categories:
                    logger.info(f"[DISCOVERY] 发现 {len(categories)} 个候选品类")
                    
                    # 验证品类有效性
                    valid_categories = self.validate_categories(categories, city_code)
                    
                    # 保存发现结果
                    self.save_discovery_results(city_code, valid_categories)
                    
                    return valid_categories
                else:
                    logger.warning(f"[DISCOVERY] 未能发现 {city_code} 的品类")
                    return []
                
            except Exception as e:
                logger.error(f"[DISCOVERY] 品类发现失败: {e}")
                return None
                
            finally:
                browser.close()
                
    def extract_categories_from_page(self, page, city_code):
        """从页面提取品类信息"""
        logger.info("[DISCOVERY] 分析页面品类结构...")
        
        content = page.content()
        categories = []
        
        # 方法1：从导航菜单提取品类链接
        try:
            # 查找品类导航链接的多种模式
            nav_patterns = [
                # 标准链接格式
                rf'<a[^>]*href="/{city_code}/ch10/([^"]*)"[^>]*>([^<]+)</a>',
                # 带有品类ID的链接
                r'<a[^>]*href="[^"]*ch10/([^"/]*)"[^>]*>([^<]+)</a>',
                # 数据属性格式
                r'data-cat-id="([^"]*)"[^>]*>([^<]+)<',
                # 简化链接格式
                r'href="/[^/]+/ch10/([^"]*)"[^>]*>([^<]+)</a>'
            ]
            
            for i, pattern in enumerate(nav_patterns):
                matches = re.findall(pattern, content, re.IGNORECASE)
                if matches:
                    logger.info(f"[DISCOVERY] 使用模式{i+1}找到 {len(matches)} 个品类链接")
                    
                    for match in matches:
                        if len(match) == 2:
                            category_id, category_name = match
                            category_id = category_id.strip()
                            category_name = category_name.strip()
                            
                            # 过滤无效品类
                            if self.is_valid_category_name(category_name) and category_id:
                                category = {
                                    'id': category_id,
                                    'name': category_name,
                                    'url': f"https://www.dianping.com/{city_code}/ch10/{category_id}",
                                    'source': f'pattern_{i+1}'
                                }
                                categories.append(category)
                    
                    if categories:  # 如果找到了品类就不再尝试其他模式
                        break
                        
        except Exception as e:
            logger.warning(f"[DISCOVERY] 导航菜单提取失败: {e}")
        
        # 方法2：基于关键词的智能发现
        if not categories:
            logger.info("[DISCOVERY] 尝试基于关键词的智能发现...")
            categories = self.discover_categories_by_keywords(content, city_code)
        
        # 去重和排序
        unique_categories = self.deduplicate_categories(categories)
        
        logger.info(f"[DISCOVERY] 最终发现 {len(unique_categories)} 个候选品类:")
        for cat in unique_categories:
            logger.info(f"  - {cat['name']} (ID: {cat['id']}, 来源: {cat.get('source', 'unknown')})")
            
        return unique_categories
        
    def discover_categories_by_keywords(self, content, city_code):
        """基于关键词智能发现品类"""
        categories = []
        
        # 预定义的品类ID映射（基于大众点评的常见ID）
        common_category_ids = {
            '日式料理': 'g113',
            '川菜': 'g103', 
            '粤菜': 'g101',
            '江浙菜': 'g102',
            '西餐': 'g114',
            '小吃快餐': 'g116',
            '烧烤烤鱼': 'g121',
            '湘菜': 'g104',
            '东北菜': 'g108',
            '新疆菜': 'g124',
            '咖啡':'g132',
            '饮品':'g34236'

        }
        
        for category_name, keywords in self.category_keywords.items():
            # 检查页面是否包含该品类的关键词
            keyword_count = sum(1 for keyword in keywords if keyword in content)
            
            if keyword_count >= 2:  # 至少包含2个关键词
                category_id = common_category_ids.get(category_name, f'g{hash(category_name) % 1000}')
                
                category = {
                    'id': category_id,
                    'name': category_name,
                    'url': f"https://www.dianping.com/{city_code}/ch10/{category_id}",
                    'confidence': keyword_count / len(keywords),
                    'source': 'keyword_discovery'
                }
                categories.append(category)
        
        return categories
        
    def is_valid_category_name(self, category_name):
        """验证品类名称是否有效"""
        # 过滤无效的品类名称
        invalid_keywords = [
            '全部', '推荐', '热门', '附近', '地图', '筛选', '排序', '价格', '评分', 
            '距离', '更多', '登录', '注册', '首页', '搜索', '客服', '帮助', '关于',
            '条件', '重置', '确定', '取消', '返回', '刷新'
        ]
        
        # 检查是否包含无效关键词
        for keyword in invalid_keywords:
            if keyword in category_name:
                return False
        
        # 检查长度和格式
        if len(category_name) < 2 or len(category_name) > 15:
            return False
            
        # 检查是否包含数字或特殊字符过多
        if re.search(r'[0-9]{3,}', category_name) or category_name.count('&') > 1:
            return False
            
        return True
        
    def deduplicate_categories(self, categories):
        """去重和排序品类"""
        unique_categories = []
        seen_ids = set()
        seen_names = set()
        
        for category in categories:
            category_id = category['id']
            category_name = category['name']
            
            # 基于ID和名称去重
            if category_id not in seen_ids and category_name not in seen_names:
                unique_categories.append(category)
                seen_ids.add(category_id)
                seen_names.add(category_name)
        
        # 按置信度排序（如果有的话）
        unique_categories.sort(key=lambda x: x.get('confidence', 0.5), reverse=True)
        
        return unique_categories
        
    def validate_categories(self, categories, city_code):
        """验证品类的有效性和数据质量"""
        logger.info(f"[VALIDATION] 开始验证 {len(categories)} 个品类...")
        
        valid_categories = []
        
        for i, category in enumerate(categories):
            logger.info(f"[VALIDATION] 验证品类 {i+1}/{len(categories)}: {category['name']}")
            
            try:
                is_valid, validation_info = self.validate_single_category(city_code, category)
                
                if is_valid:
                    category['validation_info'] = validation_info
                    valid_categories.append(category)
                    logger.info(f"[VALIDATION] ✅ {category['name']} 验证通过: {validation_info}")
                else:
                    logger.warning(f"[VALIDATION] ❌ {category['name']} 验证失败: {validation_info}")
                
                # 验证间隔
                time.sleep(random.uniform(8, 15))
                
            except Exception as e:
                logger.error(f"[VALIDATION] {category['name']} 验证异常: {e}")
                continue
        
        logger.info(f"[VALIDATION] 验证完成，{len(valid_categories)}/{len(categories)} 个品类有效")
        return valid_categories
        
    def validate_single_category(self, city_code, category):
        """验证单个品类的有效性"""
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=True)  # 验证时使用无头模式
            context = browser.new_context()
            
            cookies = self.parse_cookies()
            context.add_cookies(cookies)
            page = context.new_page()
            
            try:
                # 访问品类页面
                url = category['url']
                page.goto(url, timeout=25000)
                page.wait_for_load_state('networkidle', timeout=20000)
                
                # 检查页面是否有效
                if 'login' in page.url.lower():
                    return False, "Cookie失效"
                
                content = page.content()
                
                # 检查是否有商铺列表
                shop_blocks = re.findall(r'<li class="">(.*?)</li>', content, re.DOTALL)
                
                if len(shop_blocks) < 10:  # 至少需要10个商铺
                    return False, f"商铺数量不足: {len(shop_blocks)}/10"
                
                # 检查商铺名称与品类的相关性
                relevant_count = 0
                total_checked = min(len(shop_blocks), 15)  # 检查前15个商铺
                
                for block in shop_blocks[:total_checked]:
                    shop_name_match = re.search(r'<h4>([^<]+)</h4>', block)
                    if shop_name_match:
                        shop_name = shop_name_match.group(1)
                        if self.is_shop_relevant_to_category(shop_name, category['name']):
                            relevant_count += 1
                
                relevance_rate = relevant_count / total_checked if total_checked > 0 else 0
                
                if relevance_rate >= 0.6:  # 60%相关性阈值
                    return True, f"商铺数:{len(shop_blocks)}, 相关性:{relevance_rate:.1%}"
                else:
                    return False, f"相关性不足:{relevance_rate:.1%} (需要≥60%)"
                    
            except Exception as e:
                return False, f"验证异常: {str(e)[:50]}"
                
            finally:
                browser.close()
                
    def is_shop_relevant_to_category(self, shop_name, category_name):
        """判断商铺是否与品类相关"""
        keywords = self.category_keywords.get(category_name, [])
        
        # 检查商铺名称是否包含品类关键词
        for keyword in keywords:
            if keyword in shop_name:
                return True
        
        # 检查是否包含明显不相关的关键词（反向验证）
        irrelevant_keywords = {
            '日式料理': ['川菜', '湘菜', '粤菜', '烧烤', '火锅', '西餐'],
            '川菜': ['日式', '寿司', '西餐', '韩式', '泰式'],
            '西餐': ['川菜', '湘菜', '日式', '韩式', '火锅'],
            '烧烤烤鱼': ['日式', '西餐', '川菜', '粤菜']
        }
        
        irrelevant = irrelevant_keywords.get(category_name, [])
        for keyword in irrelevant:
            if keyword in shop_name:
                return False
                
        return False
        
    def save_discovery_results(self, city_code, categories):
        """保存品类发现结果"""
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        filename = f'discovered_categories_{city_code}_{timestamp}.json'
        
        data = {
            'city_code': city_code,
            'discovery_time': timestamp,
            'total_discovered': len(categories),
            'categories': categories
        }
        
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"[DISCOVERY] 发现结果已保存到: {filename}")
        except Exception as e:
            logger.error(f"[DISCOVERY] 保存发现结果失败: {e}")
            
    def get_category_structure(self, categories):
        """构建品类层级结构"""
        category_structure = {
            'primary_category': '美食',
            'secondary_categories': []
        }
        
        for category in categories:
            secondary_category = {
                'name': category['name'],
                'id': category['id'],
                'url_suffix': f"/{category['id']}",
                'validation_info': category.get('validation_info', ''),
                'confidence': category.get('confidence', 1.0)
            }
            category_structure['secondary_categories'].append(secondary_category)
        
        return category_structure

def test_dynamic_discovery():
    """测试动态品类发现功能"""
    print("🔍 测试动态品类发现功能")
    
    # 使用最新Cookie
    cookie_string = """_lxsdk_cuid=19808159371c8-05cd514b118b6d-4c657b58-1fa400-19808159371c8; _lxsdk=19808159371c8-05cd514b118b6d-4c657b58-1fa400-19808159371c8; fspop=test; _hc.v=58b26e64-727f-207a-d027-320332b3ce73.1752482289; s_ViewType=10; utm_source_rg=; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1752482343,1752649316; HMACCOUNT=F8DDF6B997D55E51; _lx_utm=utm_source%3Dbing%26utm_medium%3Dorganic; cy=1; cye=shanghai; WEBDFPID=z328wxz5vxuy5673z6z00xz9v19wyy9u8019184xuvw9795800z86877-1752741794392-1752482303408CSQGKMGfd79fef3d01d5e9aadc18ccd4d0c95075603; ua=%E5%95%A7%E5%95%A7%E3%80%82%E3%80%82%E3%80%82_9862; ctu=7449b161c0edb993f9eeccef559d43ecce5c02dccf52e7810396d4d01d8e6041; logan_session_token=pz9odrusrgwu0z8qkxds; qruuid=caeb1d2e-d18b-47d4-8232-1106c31b046e; dplet=f0cfe3d249fdcf61ec412d6e24f0a496; dper=020232203caaf9449f96d3e66223a52b7f8fcfc25389d464614ef838c54faa2f15bffc0a9e0aee344f56a26808f5c58f3fb0183ab242934193b5000000003a2b0000d93e435e3bd91a63cf49c2e6f53099e23f385c8ab301a60c3617cccf227345d9328c46a90a1980554529a0a485c8d9d5; ll=7fd06e815b796be3df069dec7836c3df; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1752713642; _lxsdk_s=19815de3c13-8a5-ff0-654%7C%7C115"""
    
    discoverer = DynamicCategoryDiscovery(cookie_string)
    
    # 测试深圳品类发现
    print("\n" + "="*60)
    print("测试城市: 深圳")
    print("="*60)
    
    categories = discoverer.discover_city_categories('shenzhen')
    
    if categories:
        print(f"\n✅ 深圳品类发现成功，共发现 {len(categories)} 个有效品类:")
        
        for i, category in enumerate(categories):
            print(f"{i+1}. {category['name']} (ID: {category['id']})")
            print(f"   验证信息: {category.get('validation_info', '未验证')}")
            print(f"   置信度: {category.get('confidence', 'N/A')}")
        
        # 构建品类结构
        structure = discoverer.get_category_structure(categories)
        print(f"\n📊 品类结构:")
        print(f"一级品类: {structure['primary_category']}")
        print(f"二级品类数量: {len(structure['secondary_categories'])}")
        
        return True
    else:
        print("❌ 深圳品类发现失败")
        return False

if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    
    test_dynamic_discovery()
