#!/usr/bin/env python3
"""
定制化爬虫 - 针对特定需求：单城市多品类各20页
基于P3版本优化，专门用于：用一个cookie爬取单个城市的多个品类，每次取20页数据

使用方法：
1. 确保cookie.txt文件存在于同一目录下，格式如下：
   cookie2="你的cookie字符串"
   cookie3="另一个cookie字符串"
   ...

2. 运行爬虫：
   - 使用默认参数：python custom_crawler_for_specific_task.py
   - 指定cookie：python custom_crawler_for_specific_task.py --cookie cookie3
   - 指定城市和品类：python custom_crawler_for_specific_task.py --cookie cookie2 --city 杭州 --categories 粤菜 水果生鲜 --pages 10
   - 指定爬取页数：python custom_crawler_for_specific_task.py --pages 10
   - 多品类示例：python custom_crawler_for_specific_task.py --cookie cookie3 --city 苏州 --categories 江浙菜 火锅 小龙虾 --pages 15
   - 完整示例：python custom_crawler_for_specific_task.py --cookie cookie4 --city 北京 --categories 日式料理 小吃快餐 川菜 粤菜 --pages 15

3. 参数说明：
   --cookie：指定使用的cookie名称，默认为cookie2
   --city：指定爬取的城市名称，默认为苏州
   --categories：指定爬取的品类名称，支持多个品类，默认为粤菜和水果生鲜
   --pages：每个品类爬取的页数，默认为20
"""

import time
import csv
import random
import re
import json
import os
import winsound
import tkinter as tk
from tkinter import messagebox
from datetime import datetime
from playwright.sync_api import sync_playwright
import logging
import threading
from fake_useragent import UserAgent

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('custom_crawler.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class CustomCrawler:
    """定制化爬虫 - 专门用于单城市两品类各20页的需求"""
    
    def __init__(self, cookie_name='cookie4'):
        logger.info(f"[INIT] 初始化爬虫，使用cookie: {cookie_name}")
        
        # 从cookie.txt文件中读取指定的cookie
        self.cookie_string = self.load_cookie_from_file(cookie_name)
        
        # 检查cookie是否加载成功
        if self.cookie_string:
            cookie_length = len(self.cookie_string)
            logger.info(f"[INIT] Cookie加载成功，长度: {cookie_length} 字符")
        else:
            logger.error(f"[INIT] Cookie加载失败，请检查cookie.txt文件是否存在且包含{cookie_name}")
        
        # 初始化User-Agent生成器
        self.ua = UserAgent()

        # 统计信息
        self.captcha_count = 0  # 验证码遇到次数
        self.skipped_pages = 0  # 跳过的页面数
        self.daily_request_count = 0  # 每日请求计数
        self.max_daily_requests = 200  # 每日最大请求数
        self.consecutive_failures = 0  # 连续失败次数
        self.max_consecutive_failures = 5  # 最大连续失败次数
        
        # 城市配置
        self.cities = {
            '长沙': 'changsha',
            '深圳': 'shenzhen',
            '苏州': 'suzhou',
            '南宁': 'nanning',
            '上海': 'shanghai',
            '广州': 'guangzhou',
            '杭州': 'hangzhou',
            '厦门': 'xiamen',
            '武汉': 'wuhan',
            '西安':'xian'
        }
        
        # 品类配置（已验证的品类ID）
        self.categories = {
            '烤肉': 'g34303',
            '面包蛋糕甜品':"g117",
            '日式料理': 'g113',
            '川菜': 'g102',
            '水果生鲜':'g2714',
            '江浙菜': 'g101',
            '小吃快餐': 'g112',
            '粤菜': 'g103',
            '火锅': 'g110',
            '烧烤烤串':'g508',
            '小龙虾': 'g1204',
            '咖啡':'g132',
            '饮品':'g34236'
        }
        
        # 核心字段
        self.core_fields = [
            'city',
            'primary_category', 
            'secondary_category',
            'shop_name',
            'avg_price'
        ]
        
        self.all_data = []

    def get_random_user_agent(self):
        """
        随机生成User-Agent（Chrome、Firefox、Edge）
        """
        try:
            browsers = ['chrome', 'firefox', 'edge']
            browser = random.choice(browsers)
            
            if browser == 'chrome':
                user_agent = self.ua.chrome
            elif browser == 'firefox':
                user_agent = self.ua.firefox
            else:  # edge
                user_agent = self.ua.edge
            
            logger.info(f"[UA] 随机选择浏览器: {browser}")
            logger.info(f"[UA] User-Agent: {user_agent[:100]}...")
            return user_agent
            
        except Exception as e:
            logger.warning(f"[UA] 生成随机User-Agent失败，使用默认: {e}")
            # 备用User-Agent列表
            fallback_uas = [
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0"
            ]
            return random.choice(fallback_uas)

    def get_random_viewport(self):
        """
        随机生成视窗大小
        """
        viewports = [
            {'width': 1920, 'height': 1080},
            {'width': 1366, 'height': 768},
            {'width': 1536, 'height': 864},
            {'width': 1440, 'height': 900},
            {'width': 1600, 'height': 900},
            {'width': 1280, 'height': 720}
        ]
        viewport = random.choice(viewports)
        logger.info(f"[VIEWPORT] 随机视窗: {viewport['width']}x{viewport['height']}")
        return viewport

    def get_browser_fingerprint_script(self):
        """
        生成浏览器指纹伪装脚本
        """
        # 随机生成一些指纹参数
        screen_width = random.choice([1920, 1366, 1536, 1440, 1600, 1280])
        screen_height = random.choice([1080, 768, 864, 900, 720])
        color_depth = random.choice([24, 32])
        timezone_offset = random.choice([-480, -420, -360, -300, -240])  # 不同时区
        language = random.choice(['zh-CN', 'zh-TW', 'en-US', 'en-GB'])
        platform = random.choice(['Win32', 'Win64'])
        
        # 随机硬件并发数
        hardware_concurrency = random.choice([4, 6, 8, 12, 16])
        
        # 随机内存大小（GB）
        device_memory = random.choice([4, 8, 16, 32])
        
        script = f"""
        // 伪装屏幕信息
        Object.defineProperty(screen, 'width', {{
            get: () => {screen_width}
        }});
        Object.defineProperty(screen, 'height', {{
            get: () => {screen_height}
        }});
        Object.defineProperty(screen, 'colorDepth', {{
            get: () => {color_depth}
        }});
        
        // 伪装时区
        Date.prototype.getTimezoneOffset = function() {{
            return {timezone_offset};
        }};
        
        // 伪装语言
        Object.defineProperty(navigator, 'language', {{
            get: () => '{language}'
        }});
        Object.defineProperty(navigator, 'languages', {{
            get: () => ['{language}']
        }});
        
        // 伪装平台
        Object.defineProperty(navigator, 'platform', {{
            get: () => '{platform}'
        }});
        
        // 伪装硬件并发数
        Object.defineProperty(navigator, 'hardwareConcurrency', {{
            get: () => {hardware_concurrency}
        }});
        
        // 伪装设备内存
        Object.defineProperty(navigator, 'deviceMemory', {{
            get: () => {device_memory}
        }});
        
        // 移除webdriver标识
        Object.defineProperty(navigator, 'webdriver', {{
            get: () => undefined
        }});
        
        // 伪装插件信息
        Object.defineProperty(navigator, 'plugins', {{
            get: () => [
                {{
                    name: 'Chrome PDF Plugin',
                    filename: 'internal-pdf-viewer',
                    description: 'Portable Document Format'
                }},
                {{
                    name: 'Chrome PDF Viewer',
                    filename: 'mhjfbmdgcfjbbpaeojofohoefgiehjai',
                    description: ''
                }}
            ]
        }});
        
        // 伪装权限查询
        const originalQuery = window.navigator.permissions.query;
        window.navigator.permissions.query = (parameters) => (
            parameters.name === 'notifications' ?
                Promise.resolve({{ state: Notification.permission }}) :
                originalQuery(parameters)
        );
        
        // 随机化Canvas指纹
        const getContext = HTMLCanvasElement.prototype.getContext;
        HTMLCanvasElement.prototype.getContext = function(type) {{
            if (type === '2d') {{
                const context = getContext.call(this, type);
                const originalFillText = context.fillText;
                context.fillText = function(text, x, y, maxWidth) {{
                    // 添加微小的随机偏移
                    const offset = Math.random() * 0.1;
                    return originalFillText.call(this, text, x + offset, y + offset, maxWidth);
                }};
                return context;
            }}
            return getContext.call(this, type);
        }};
        
        console.log('[FINGERPRINT] 浏览器指纹伪装已加载');
        """
        
        logger.info(f"[FINGERPRINT] 生成指纹: {screen_width}x{screen_height}, {language}, {hardware_concurrency}核, {device_memory}GB")
        return script

    def create_browser_context(self, playwright_instance):
        """
        创建带有随机指纹的浏览器上下文
        """
        try:
            # 使用chromium浏览器，因为兼容性最好
            
            # 随机User-Agent
            user_agent = self.get_random_user_agent()
            
            # 随机视窗
            viewport = self.get_random_viewport()
            
            # 启动浏览器
            browser = playwright_instance.chromium.launch(
                headless=False,
                args=[
                    '--no-sandbox',
                    '--disable-blink-features=AutomationControlled',
                    '--disable-web-security',
                    '--disable-features=VizDisplayCompositor',
                    '--disable-dev-shm-usage',
                    '--disable-extensions',
                    '--disable-plugins',
                    '--disable-images',  # 禁用图片加载，提高速度
                    '--disable-javascript-harmony-shipping',
                    '--disable-background-timer-throttling',
                    '--disable-backgrounding-occluded-windows',
                    '--disable-renderer-backgrounding',
                    '--disable-field-trial-config',
                    '--disable-ipc-flooding-protection',
                    f'--user-agent={user_agent}'
                ]
            )
            
            # 创建上下文
            context = browser.new_context(
                user_agent=user_agent,
                viewport=viewport,
                locale='zh-CN',
                timezone_id='Asia/Shanghai',
                # 随机化一些其他参数
                extra_http_headers={
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'DNT': str(random.choice([0, 1])),
                    'Connection': 'keep-alive',
                    'Upgrade-Insecure-Requests': '1',
                }
            )
            
            logger.info(f"[BROWSER] 浏览器上下文创建成功")
            logger.info(f"[BROWSER] User-Agent: {user_agent[:50]}...")
            logger.info(f"[BROWSER] 视窗: {viewport}")
            
            return browser, context
            
        except Exception as e:
            logger.error(f"[BROWSER] 创建浏览器上下文失败: {e}")
            raise

    def load_cookie_from_file(self, cookie_name='cookie2'):
        """从cookie.txt文件中读取指定的cookie"""
        cookie_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cookie.txt')
        
        try:
            if not os.path.exists(cookie_file_path):
                logger.error(f"[COOKIE] cookie.txt文件不存在: {cookie_file_path}")
                return ""
                
            with open(cookie_file_path, 'r', encoding='utf-8') as f:
                cookie_content = f.read()
            
            # 使用正则表达式匹配指定的cookie
            pattern = rf'{cookie_name}="(.*?)"'
            match = re.search(pattern, cookie_content, re.DOTALL)
            
            if match:
                cookie_value = match.group(1)
                logger.info(f"[COOKIE] 成功从cookie.txt加载 {cookie_name}")
                return cookie_value
            else:
                logger.error(f"[COOKIE] 在cookie.txt中未找到 {cookie_name}")
                # 如果找不到指定的cookie，尝试使用cookie2作为备用
                if cookie_name != 'cookie2':
                    logger.warning(f"[COOKIE] 尝试使用cookie2作为备用")
                    return self.load_cookie_from_file('cookie2')
                return ""
                
        except Exception as e:
            logger.error(f"[COOKIE] 读取cookie文件失败: {e}")
            return ""
    
    def parse_cookies(self):
        """解析Cookie字符串"""
        cookies = []
        for cookie_pair in self.cookie_string.split('; '):
            if '=' in cookie_pair:
                name, value = cookie_pair.split('=', 1)
                cookies.append({
                    'name': name.strip(),
                    'value': value.strip(),
                    'domain': '.dianping.com',
                    'path': '/'
                })
        logger.info(f"[COOKIE] 解析了 {len(cookies)} 个Cookie")
        return cookies

    def extract_shop_data(self, page, city_name, category_name):
        """提取商铺数据"""
        logger.info(f"[EXTRACT] 开始提取数据: {city_name}-{category_name}")
        
        try:
            # 等待页面加载
            page.wait_for_load_state('networkidle', timeout=20000)
            
            # 检查是否被重定向到登录页面
            if 'login' in page.url.lower():
                logger.error("[EXTRACT] Cookie失效，被重定向到登录页面")
                return []
            
            content = page.content()
            shops = []
            
            # 提取商铺信息
            shop_blocks = re.findall(r'<li class="">(.*?)</li>', content, re.DOTALL)
            logger.info(f"[EXTRACT] 找到 {len(shop_blocks)} 个商铺块")
            
            for block in shop_blocks:
                try:
                    # 提取商铺名称
                    name_match = re.search(r'<h4>([^<]+)</h4>', block)
                    if not name_match:
                        continue
                    shop_name = name_match.group(1).strip()
                    
                    # 提取价格
                    price_match = re.search(r'<b>￥(\d+)</b>', block)
                    avg_price = price_match.group(1) if price_match else ""
                    
                    # 构建数据
                    shop = {
                        'city': city_name,
                        'primary_category': '美食',
                        'secondary_category': category_name,
                        'shop_name': shop_name,
                        'avg_price': avg_price
                    }
                    
                    shops.append(shop)
                    
                except Exception as e:
                    continue
            
            logger.info(f"[EXTRACT] {city_name}-{category_name} 提取完成，共 {len(shops)} 条数据")
            return shops
            
        except Exception as e:
            logger.error(f"[EXTRACT] {city_name}-{category_name} 数据提取失败: {e}")
            return []

    def detect_captcha(self, page):
        """检测验证码 - 优化版本，更加智能和宽容"""
        try:
            # 检测常见的验证码元素
            captcha_selectors = [
                '.captcha',
                '#captcha',
                '[class*="verify"]',
                '[id*="verify"]',
                '.verification',
                '[class*="captcha"]'
            ]

            # 只有当验证码元素可见时才认为是真正的验证码
            for selector in captcha_selectors:
                element = page.query_selector(selector)
                if element and element.is_visible():
                    return f"检测到可见验证码元素: {selector}"

            # 检测页面标题 - 更严格的判断
            title = page.title().lower()
            # 只有当标题明确包含验证相关词汇且页面内容确实是验证页面时才判断为验证码
            if ('验证中心' in title or 'verification center' in title or
                'captcha' in title or '人机验证' in title):
                # 进一步检查页面内容确认是验证页面
                page_content = page.content().lower()
                if ('验证码' in page_content or 'captcha' in page_content or
                    '人机验证' in page_content or 'verification' in page_content):
                    return f"页面标题和内容确认为验证页面: {title}"

            return None

        except Exception as e:
            logger.debug(f"验证码检测异常: {e}")  # 改为debug级别，减少日志噪音
            return None

    def simulate_user_behavior(self, page):
        """增强的用户行为模拟 - 更自然的反检测"""
        try:
            logger.info("[BEHAVIOR] 开始增强用户行为模拟...")

            # 1. 随机初始停留时间变化
            initial_wait = random.uniform(2, 8)  # 2-8秒，更大范围
            logger.debug(f"[BEHAVIOR] 初始停留: {initial_wait:.1f}秒")
            time.sleep(initial_wait)

            # 2. 随机鼠标移动（更自然的轨迹）
            mouse_moves = random.randint(2, 8)  # 2-8次移动
            for i in range(mouse_moves):
                # 模拟更自然的鼠标移动区域
                if i == 0:
                    # 第一次移动到页面中心附近
                    x = random.randint(800, 1200)
                    y = random.randint(300, 600)
                else:
                    # 后续移动在合理范围内
                    x = random.randint(200, 1700)
                    y = random.randint(200, 900)

                page.mouse.move(x, y)
                time.sleep(random.uniform(0.8, 3.0))  # 移动间隔更自然

            # 3. 随机点击行为（新增）
            if random.random() < 0.6:  # 60%概率进行点击
                try:
                    # 随机点击页面空白区域或安全元素
                    click_x = random.randint(300, 1600)
                    click_y = random.randint(200, 800)
                    page.mouse.click(click_x, click_y)
                    logger.debug(f"[BEHAVIOR] 随机点击: ({click_x}, {click_y})")
                    time.sleep(random.uniform(1, 3))
                except Exception as e:
                    logger.debug(f"[BEHAVIOR] 点击失败: {e}")

            # 4. 更自然的滚动模式（改进）
            scroll_type = random.choice(['smooth', 'step', 'reading'])

            if scroll_type == 'smooth':
                # 平滑滚动
                for i in range(random.randint(3, 6)):
                    scroll_pos = random.uniform(0.1, 0.9)
                    page.evaluate(f"window.scrollTo({{top: document.body.scrollHeight * {scroll_pos}, behavior: 'smooth'}})")
                    time.sleep(random.uniform(2, 5))

            elif scroll_type == 'step':
                # 分步滚动（模拟阅读）
                positions = [0.2, 0.4, 0.6, 0.8]
                random.shuffle(positions)
                for pos in positions[:random.randint(2, 4)]:
                    page.evaluate(f"window.scrollTo(0, document.body.scrollHeight * {pos})")
                    time.sleep(random.uniform(3, 6))

            else:  # reading
                # 阅读式滚动（向下然后向上）
                page.evaluate("window.scrollTo(0, document.body.scrollHeight * 0.3)")
                time.sleep(random.uniform(2, 4))
                page.evaluate("window.scrollTo(0, document.body.scrollHeight * 0.7)")
                time.sleep(random.uniform(3, 5))
                page.evaluate("window.scrollTo(0, document.body.scrollHeight * 0.4)")
                time.sleep(random.uniform(2, 3))

            # 5. 偶尔的页面刷新（新增）
            if random.random() < 0.15:  # 15%概率刷新页面
                logger.info("[BEHAVIOR] 执行随机页面刷新...")
                try:
                    page.reload(timeout=30000)
                    time.sleep(random.uniform(3, 6))
                    logger.debug("[BEHAVIOR] 页面刷新完成")
                except Exception as e:
                    logger.debug(f"[BEHAVIOR] 页面刷新失败: {e}")

            # 6. 随机的页面停留时间变化（新增）
            stay_pattern = random.choice(['short', 'medium', 'long'])
            if stay_pattern == 'short':
                final_wait = random.uniform(2, 5)
            elif stay_pattern == 'medium':
                final_wait = random.uniform(4, 8)
            else:  # long
                final_wait = random.uniform(6, 12)

            logger.debug(f"[BEHAVIOR] 最终停留模式: {stay_pattern}, 时间: {final_wait:.1f}秒")
            time.sleep(final_wait)

            # 7. 偶尔的额外交互
            if random.random() < 0.3:  # 30%概率进行额外交互
                try:
                    # 模拟查看页面底部
                    page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                    time.sleep(random.uniform(1, 3))
                    # 回到顶部
                    page.evaluate("window.scrollTo(0, 0)")
                    time.sleep(random.uniform(1, 2))
                    logger.debug("[BEHAVIOR] 执行额外页面交互")
                except Exception as e:
                    logger.debug(f"[BEHAVIOR] 额外交互失败: {e}")

            logger.info("[BEHAVIOR] 增强用户行为模拟完成")

        except Exception as e:
            logger.error(f"用户行为模拟异常: {e}")
            # 失败时至少等待一段时间
            time.sleep(random.uniform(3, 6))

    def show_captcha_alert(self, city_name, category_name, page_num, retry_count, max_retries):
        """显示验证码弹窗提醒"""
        try:
            def show_alert():
                root = tk.Tk()
                root.withdraw()  # 隐藏主窗口

                # 设置弹窗在最前面
                root.attributes('-topmost', True)

                message = f"""🚨 验证码检测提醒

📍 城市：{city_name}
📂 品类：{category_name}
📄 页面：第{page_num}页
🔄 重试：{retry_count}/{max_retries}

⚠️ 系统检测到验证码，正在自动重试...
💡 如果频繁出现，建议稍后再试

⏰ 时间：{datetime.now().strftime('%H:%M:%S')}"""

                messagebox.showwarning("验证码提醒", message)
                root.destroy()

            # 在新线程中显示弹窗，避免阻塞爬虫
            alert_thread = threading.Thread(target=show_alert)
            alert_thread.daemon = True
            alert_thread.start()

            # 播放提醒音效
            try:
                winsound.Beep(1000, 300)  # 1000Hz，300ms
            except:
                pass

            logger.info(f"[ALERT] 验证码弹窗提醒已发送: {city_name}-{category_name}-第{page_num}页")

        except Exception as e:
            logger.error(f"[ALERT] 验证码弹窗提醒失败: {e}")

    def show_captcha_skip_alert(self, city_name, category_name, page_num):
        """显示验证码跳过页面的弹窗提醒"""
        try:
            def show_alert():
                root = tk.Tk()
                root.withdraw()  # 隐藏主窗口

                # 设置弹窗在最前面
                root.attributes('-topmost', True)

                message = f"""⚠️ 验证码跳过提醒

📍 城市：{city_name}
📂 品类：{category_name}
📄 页面：第{page_num}页

❌ 验证码重试失败，已跳过此页面
🔄 爬虫将继续下一页...

💡 建议：如果频繁跳过，考虑暂停爬取

⏰ 时间：{datetime.now().strftime('%H:%M:%S')}"""

                messagebox.showwarning("页面跳过提醒", message)
                root.destroy()

            # 在新线程中显示弹窗
            alert_thread = threading.Thread(target=show_alert)
            alert_thread.daemon = True
            alert_thread.start()

            # 播放不同的提醒音效
            try:
                for _ in range(2):
                    winsound.Beep(800, 200)
                    time.sleep(0.1)
            except:
                pass

            logger.info(f"[ALERT] 页面跳过弹窗提醒已发送: {city_name}-{category_name}-第{page_num}页")

        except Exception as e:
            logger.error(f"[ALERT] 页面跳过弹窗提醒失败: {e}")

    def crawl_specific_task(self, city_name, category_names, max_pages=20):
        """
        执行特定任务：爬取指定城市的指定品类，每个品类最多20页
        
        Args:
            city_name: 城市名称
            category_names: 品类名称列表（最多2个）
            max_pages: 每个品类的最大页数（默认20）
        """
        logger.info(f"[TASK] 开始执行特定任务: {city_name}, 品类: {category_names}, 每品类最多{max_pages}页")

        # 重置统计变量
        self.captcha_count = 0
        self.skipped_pages = 0
        self.page_refresh_count = 0  # 页面刷新次数
        self.ua_change_count = 0     # User-Agent更换次数

        # 检查cookie是否为空
        if not self.cookie_string:
            logger.error("[TASK] Cookie为空，无法执行爬取任务。请检查cookie.txt文件是否存在且格式正确。")
            self.play_error_sound()
            return False

        # 播放开始音效
        self.play_start_sound()
        
        # 验证输入
        if city_name not in self.cities:
            logger.error(f"[TASK] 不支持的城市: {city_name}")
            return False
        
        # 支持多品类爬取，不再限制数量
        logger.info(f"[TASK] 将爬取 {len(category_names)} 个品类: {category_names}")
        
        city_code = self.cities[city_name]
        task_start_time = datetime.now()
        
        all_task_data = []
        
        with sync_playwright() as p:
            try:
                # 使用新的随机化浏览器上下文创建方法
                browser, context = self.create_browser_context(p)
                
                # 添加Cookie
                cookies = self.parse_cookies()
                context.add_cookies(cookies)
                logger.info("✅ Cookie已添加")
                
                # 创建页面并添加指纹伪装脚本
                page = context.new_page()
                
                # 添加浏览器指纹伪装脚本
                fingerprint_script = self.get_browser_fingerprint_script()
                page.add_init_script(fingerprint_script)
                
                for i, category_name in enumerate(category_names):
                    if category_name not in self.categories:
                        logger.error(f"[TASK] 不支持的品类: {category_name}")
                        continue
                    
                    category_id = self.categories[category_name]
                    logger.info(f"[TASK] 开始处理品类 {i+1}/{len(category_names)}: {category_name}")
                    
                    category_data = []
                    consecutive_empty_pages = 0  # 连续无数据页面计数器
                    max_consecutive_empty = 2    # 连续无数据页面阈值

                    # 爬取指定页数
                    for page_num in range(1, max_pages + 1):
                        try:
                            # 构造URL
                            if page_num == 1:
                                url = f"https://www.dianping.com/{city_code}/ch10/{category_id}"
                            else:
                                url = f"https://www.dianping.com/{city_code}/ch10/{category_id}p{page_num}"
                            
                            logger.info(f"[TASK] 第{page_num}页: {url}")
                            
                            page.goto(url, timeout=30000)
                            
                            # 检查验证码 - 带重试机制
                            captcha_retry_count = 0
                            max_captcha_retries = 2

                            while captcha_retry_count < max_captcha_retries:
                                captcha = self.detect_captcha(page)
                                if captcha:
                                    self.captcha_count += 1  # 统计验证码次数
                                    captcha_retry_count += 1
                                    logger.warning(f"[TASK] 检测到验证码: {captcha}，重试 {captcha_retry_count}/{max_captcha_retries}")

                                    # 显示验证码弹窗提醒
                                    self.show_captcha_alert(city_name, category_name, page_num, captcha_retry_count, max_captcha_retries)

                                    if captcha_retry_count < max_captcha_retries:
                                        # 等待后重新加载页面（增加延迟）
                                        time.sleep(random.uniform(25, 40))
                                        try:
                                            page.reload(timeout=30000)
                                            time.sleep(random.uniform(5, 8))
                                        except Exception as e:
                                            logger.warning(f"[TASK] 页面重载失败: {e}")
                                            break
                                    else:
                                        # 最大重试次数后跳过当前页面（增加延迟）
                                        self.skipped_pages += 1  # 统计跳过页面数
                                        logger.warning(f"[TASK] 验证码重试失败，跳过第{page_num}页")

                                        # 显示验证码跳过弹窗提醒
                                        self.show_captcha_skip_alert(city_name, category_name, page_num)

                                        time.sleep(random.uniform(35, 50))
                                        break
                                else:
                                    # 没有验证码，跳出重试循环
                                    break

                            # 如果最终还是有验证码，跳过当前页面
                            if captcha_retry_count >= max_captcha_retries:
                                consecutive_empty_pages += 1  # 验证码失败也计入连续无数据
                                logger.warning(f"[TASK] 验证码重试失败，计入连续无数据页面: {consecutive_empty_pages}")

                                # 检查是否达到连续无数据阈值
                                if consecutive_empty_pages >= max_consecutive_empty:
                                    logger.warning(f"[TASK] 连续{consecutive_empty_pages}页无数据（含验证码失败），停止爬取品类: {category_name}")
                                    break
                                continue
                            
                            # 偶尔更换User-Agent（每15页左右）
                            if page_num % random.randint(12, 18) == 0:
                                try:
                                    new_ua = self.get_random_user_agent()
                                    page.set_extra_http_headers({'User-Agent': new_ua})
                                    self.ua_change_count += 1
                                    logger.debug(f"[TASK] 第{page_num}页更换User-Agent")
                                except Exception as e:
                                    logger.debug(f"[TASK] User-Agent更换失败: {e}")

                            # 模拟用户行为
                            self.simulate_user_behavior(page)
                            
                            # 提取数据
                            page_shops = self.extract_shop_data(page, city_name, category_name)

                            if page_shops:
                                category_data.extend(page_shops)
                                consecutive_empty_pages = 0  # 重置连续无数据计数器
                                logger.info(f"[TASK] 第{page_num}页成功: {len(page_shops)} 个商铺")
                            else:
                                consecutive_empty_pages += 1  # 增加连续无数据计数器
                                logger.warning(f"[TASK] 第{page_num}页无数据")

                                # 检查是否达到连续无数据阈值
                                if consecutive_empty_pages >= max_consecutive_empty:
                                    logger.warning(f"[TASK] 连续{consecutive_empty_pages}页无数据，停止爬取品类: {category_name}")
                                    break
                            
                            # 智能页面间延迟（更自然的延迟策略）
                            if page_num < max_pages:
                                # 根据页面数和验证码情况调整延迟
                                base_delay = random.uniform(20, 35)  # 基础延迟20-35秒

                                # 如果最近遇到验证码，增加延迟
                                if self.captcha_count > 0 and (page_num - 1) % 5 == 0:
                                    base_delay += random.uniform(10, 20)
                                    logger.debug("[TASK] 验证码保护延迟已激活")

                                # 每10页增加额外休息时间
                                if page_num % 10 == 0:
                                    base_delay += random.uniform(15, 30)
                                    logger.debug("[TASK] 长时间爬取保护延迟已激活")

                                # 随机延迟模式
                                delay_pattern = random.choice(['normal', 'careful', 'relaxed'])
                                if delay_pattern == 'careful':
                                    base_delay *= random.uniform(1.2, 1.5)  # 增加20-50%
                                elif delay_pattern == 'relaxed':
                                    base_delay *= random.uniform(0.8, 1.0)  # 减少0-20%

                                logger.info(f"[TASK] 页面延迟({delay_pattern}): {base_delay:.1f}秒")
                                time.sleep(base_delay)
                            
                        except Exception as e:
                            logger.error(f"[TASK] 第{page_num}页异常: {e}")
                            break
                    
                    # 品类完成统计
                    logger.info(f"[TASK] 品类 {category_name} 完成: {len(category_data)} 个商铺")
                    all_task_data.extend(category_data)
                    
                    # 品类间延迟（增加延迟）
                    if i < len(category_names) - 1:
                        delay = random.uniform(60, 90)  # 60-90秒延迟
                        logger.info(f"[TASK] 品类间延迟: {delay:.1f}秒")
                        time.sleep(delay)
                
                # 保存数据
                if all_task_data:
                    self.save_task_data(all_task_data, city_name, category_names)
                    
                    # 显示完成通知（弹窗 + 音效）
                    task_elapsed = (datetime.now() - task_start_time).total_seconds()
                    self.show_completion_notification(city_name, category_names, len(all_task_data), task_elapsed,
                                                    self.captcha_count, self.skipped_pages)
                else:
                    self.play_error_sound()
                
                # 任务完成统计
                task_elapsed = (datetime.now() - task_start_time).total_seconds()
                logger.info(f"[TASK] 任务完成!")
                logger.info(f"[TASK] 总耗时: {task_elapsed/60:.1f}分钟")
                logger.info(f"[TASK] 总商铺: {len(all_task_data)} 个")
                logger.info(f"[TASK] 品类数: {len(category_names)} 个")
                logger.info(f"[TASK] 🛡️ 反检测统计:")
                logger.info(f"[TASK]   验证码遇到: {self.captcha_count} 次")
                logger.info(f"[TASK]   跳过页面: {self.skipped_pages} 页")
                logger.info(f"[TASK]   页面刷新: {getattr(self, 'page_refresh_count', 0)} 次")
                logger.info(f"[TASK]   UA更换: {getattr(self, 'ua_change_count', 0)} 次")
                
                return True
                
            except Exception as e:
                logger.error(f"[TASK] 任务执行异常: {e}")
                self.play_error_sound()
                return False
            
            finally:
                if 'browser' in locals():
                    browser.close()

    def show_completion_notification(self, city_name, categories, total_shops, elapsed_seconds=0,
                                   captcha_count=0, skipped_pages=0):
        """
        显示爬取完成通知（弹窗 + 音效）
        """
        try:
            # 播放系统提示音
            winsound.MessageBeep(winsound.MB_OK)

            # 计算平均效率
            avg_per_minute = (total_shops / (elapsed_seconds / 60)) if elapsed_seconds > 0 else 0

            # 创建通知消息
            captcha_info = f"🛡️ 验证码遇到：{captcha_count}次" if captcha_count > 0 else "🛡️ 验证码遇到：0次"
            skip_info = f"⏭️ 跳过页面：{skipped_pages}页" if skipped_pages > 0 else "⏭️ 跳过页面：0页"

            # 新增反检测统计
            refresh_info = f"🔄 页面刷新：{getattr(self, 'page_refresh_count', 0)}次"
            ua_info = f"🎭 UA更换：{getattr(self, 'ua_change_count', 0)}次"

            message = f"""
🎉 爬取任务完成！

📍 城市：{city_name}
📂 品类：{', '.join(categories)} ({len(categories)}个)
🏪 总商铺数：{total_shops}
⏰ 总耗时：{elapsed_seconds/60:.1f}分钟
📊 平均效率：{avg_per_minute:.1f}个/分钟

🛡️ 反检测统计：
{captcha_info}
{skip_info}
{refresh_info}
{ua_info}

💾 完成时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

✅ 数据已保存到CSV文件中！
            """
            
            # 在新线程中显示弹窗，避免阻塞主程序
            def show_popup():
                root = tk.Tk()
                root.withdraw()  # 隐藏主窗口
                messagebox.showinfo("爬取完成通知", message)
                root.destroy()
            
            # 启动通知线程
            notification_thread = threading.Thread(target=show_popup)
            notification_thread.daemon = True
            notification_thread.start()
            
            # 播放额外的成功音效（连续3声短促提示音）
            for _ in range(3):
                winsound.Beep(800, 200)  # 800Hz频率，200ms持续时间
                time.sleep(0.1)
                
            logging.info(f"✅ 通知已发送：{city_name} 爬取完成，共获取 {total_shops} 个商铺")
            
        except Exception as e:
            logging.error(f"❌ 发送通知失败：{e}")

    def play_error_sound(self):
        """
        播放错误提示音
        """
        try:
            winsound.MessageBeep(winsound.MB_ICONHAND)
            winsound.Beep(400, 500)  # 低频错误音
        except Exception as e:
            logging.error(f"播放错误音效失败：{e}")

    def play_start_sound(self):
        """
        播放开始爬取提示音
        """
        try:
            winsound.MessageBeep(winsound.MB_ICONASTERISK)
            winsound.Beep(1000, 300)  # 高频开始音
        except Exception as e:
            logging.error(f"播放开始音效失败：{e}")

    def save_task_data(self, data, city_name, category_names):
        """保存任务数据"""
        if not data:
            logger.warning("[SAVE] 没有数据需要保存")
            return
        
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        categories_str = "_".join(category_names)
        filename = f'custom_crawl_{city_name}_{categories_str}_{timestamp}.csv'
        
        try:
            with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=self.core_fields)
                writer.writeheader()
                for shop in data:
                    writer.writerow(shop)
            
            logger.info(f"[SAVE] 数据已保存到: {filename}")
            
            # 数据质量分析
            price_complete = sum(1 for shop in data if shop['avg_price'])
            price_rate = (price_complete / len(data)) * 100 if data else 0
            
            logger.info(f"[SAVE] 数据质量分析:")
            logger.info(f"  总商铺数: {len(data)}")
            logger.info(f"  价格完整率: {price_rate:.1f}% ({price_complete}/{len(data)})")
            
            # 按品类统计
            category_stats = {}
            for shop in data:
                cat = shop['secondary_category']
                if cat not in category_stats:
                    category_stats[cat] = 0
                category_stats[cat] += 1
            
            logger.info(f"  品类分布:")
            for cat, count in category_stats.items():
                logger.info(f"    {cat}: {count} 个商铺")
            
        except Exception as e:
            logger.error(f"[SAVE] 保存失败: {e}")

def main():
    """主函数 - 使用示例"""
    import argparse
    
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description='定制化爬虫 - 单城市多品类各20页')
    parser.add_argument('--cookie', type=str, default='cookie2', help='指定使用的cookie名称，例如cookie2、cookie3等')
    parser.add_argument('--city', type=str, default='苏州', help='指定爬取的城市名称')
    parser.add_argument('--categories', type=str, nargs='+', default=['粤菜', '水果生鲜'], help='指定爬取的品类名称，支持多个品类')
    parser.add_argument('--pages', type=int, default=20, help='每个品类爬取的页数')
    
    # 解析命令行参数
    args = parser.parse_args()
    
    # 使用指定的cookie初始化爬虫
    crawler = CustomCrawler(cookie_name=args.cookie)
    
    # 支持多品类爬取
    logger.info(f"指定品类数量: {len(args.categories)} 个")
    
    logger.info("=" * 50)
    logger.info("定制化爬虫启动 - 随机化User-Agent和浏览器指纹")
    logger.info(f"使用Cookie: {args.cookie}")
    logger.info(f"目标城市: {args.city}")
    logger.info(f"目标品类: {args.categories}")
    logger.info(f"每品类页数: {args.pages}")
    logger.info("=" * 50)
    
    success = crawler.crawl_specific_task(args.city, args.categories, args.pages)
    
    if success:
        logger.info("🎉 任务执行成功!")
    else:
        logger.error("❌ 任务执行失败!")

if __name__ == "__main__":
    main()