#!/usr/bin/env python3
"""
大众点评爬虫 P4 版本 - 爬虫核心模块
负责核心爬取逻辑和数据提取
"""

import logging
import time
import random
import re
import threading
from typing import Dict, List, Optional, Tuple, Any
from playwright.sync_api import sync_playwright, Page
from pathlib import Path
import sys

# 添加配置路径
sys.path.append(str(Path(__file__).parent.parent))
from config.settings import get_config, get_cookie_string

# 导入反爬虫增强模块
try:
    from .fingerprint_manager import FingerprintManager
    from .behavior_simulator import BehaviorSimulator
    ANTI_SPIDER_ENHANCED = True
    print("[CRAWLER_CORE] 反爬虫增强模块导入成功")
except ImportError as e:
    ANTI_SPIDER_ENHANCED = False
    print(f"[CRAWLER_CORE] 反爬虫增强模块导入失败: {e}")
    # 创建占位类
    class FingerprintManager:
        def get_current_fingerprint(self): return {}
        def get_stealth_script(self): return ""
        def get_browser_args(self): return []

    class BehaviorSimulator:
        def simulate_realistic_browsing(self, page):
            time.sleep(random.uniform(2, 5))
            return 5.0

# 导入通知库
try:
    from plyer import notification
    NOTIFICATION_AVAILABLE = True
except ImportError:
    try:
        from win10toast import ToastNotifier
        NOTIFICATION_AVAILABLE = True
    except ImportError:
        NOTIFICATION_AVAILABLE = False

logger = logging.getLogger(__name__)

class CrawlerCore:
    """爬虫核心 - 负责核心爬取逻辑"""
    
    def __init__(self):
        self.config = get_config()
        self.cookie_string = get_cookie_string()

        # 处理空Cookie的情况
        if not self.cookie_string or self.cookie_string.strip() == "":
            logger.warning("[CRAWLER_CORE] Cookie为空，将使用默认测试Cookie")
            self.cookie_string = "fspop=test; _lxsdk_cuid=test"

        # 核心字段定义
        self.core_fields = self.config['data']['core_fields']

        # 统计信息
        self.stats = {
            'pages_crawled': 0,
            'shops_extracted': 0,
            'errors_encountered': 0,
            'start_time': None,
            'end_time': None
        }

        # 通知相关
        self.notification_enabled = NOTIFICATION_AVAILABLE
        if not self.notification_enabled:
            logger.warning("[CRAWLER_CORE] 通知库不可用，将跳过通知功能")

        # 反爬管理器
        try:
            from .anti_spider_manager import AntiSpiderManager
            self.anti_spider = AntiSpiderManager()
            logger.info("[CRAWLER_CORE] 反爬管理器初始化成功")
        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 反爬管理器初始化失败: {e}")
            self.anti_spider = None

        # 初始化反爬虫增强组件
        if ANTI_SPIDER_ENHANCED:
            try:
                self.fingerprint_manager = FingerprintManager()
                self.behavior_simulator = BehaviorSimulator()
                logger.info("[CRAWLER_CORE] ✅ 反爬虫增强组件初始化成功")
            except Exception as e:
                logger.error(f"[CRAWLER_CORE] ❌ 反爬虫增强组件初始化失败: {e}")
                self.fingerprint_manager = FingerprintManager()
                self.behavior_simulator = BehaviorSimulator()
        else:
            self.fingerprint_manager = FingerprintManager()
            self.behavior_simulator = BehaviorSimulator()
            logger.warning("[CRAWLER_CORE] 使用基础反爬虫组件")

        # 初始化第1.5阶段组件
        try:
            from .account_pool_manager import AccountPoolManager
            from .network_scheduler import NetworkScheduler
            from .simple_monitor import SimpleMonitor
            from .category_discovery import CategoryDiscovery

            self.account_manager = AccountPoolManager()
            self.scheduler = NetworkScheduler()
            self.monitor = SimpleMonitor(self.account_manager, self.scheduler)
            self.category_discovery = CategoryDiscovery()

            logger.info("[CRAWLER_CORE] ✅ 第1.5阶段组件初始化成功")
            self.phase_1_5_enabled = True

        except Exception as e:
            logger.warning(f"[CRAWLER_CORE] ⚠️ 第1.5阶段组件初始化失败: {e}")
            self.phase_1_5_enabled = False
            # 创建占位对象
            self.account_manager = None
            self.scheduler = None
            self.monitor = None
            self.category_discovery = None

    def send_notification(self, title: str, message: str):
        """发送系统通知（非阻塞）"""
        if not self.notification_enabled:
            logger.debug(f"[NOTIFICATION] {title}: {message}")
            return

        def _send_notification():
            try:
                if 'plyer' in sys.modules:
                    notification.notify(
                        title=title,
                        message=message,
                        app_name="大众点评爬虫P4",
                        timeout=10
                    )
                else:
                    # 使用win10toast作为备选
                    toaster = ToastNotifier()
                    toaster.show_toast(title, message, duration=10)
                logger.info(f"[NOTIFICATION] ✅ 通知已发送: {title}")
            except Exception as e:
                logger.error(f"[NOTIFICATION] ❌ 通知发送失败: {e}")

        # 异步发送通知，避免阻塞主线程
        threading.Thread(target=_send_notification, daemon=True).start()

    def parse_cookies(self) -> List[Dict[str, str]]:
        """解析Cookie字符串"""
        cookies = []
        for cookie_pair in self.cookie_string.split('; '):
            if '=' in cookie_pair:
                name, value = cookie_pair.split('=', 1)
                cookies.append({
                    'name': name.strip(),
                    'value': value.strip(),
                    'domain': '.dianping.com',
                    'path': '/'
                })
        return cookies
    
    def create_browser_context(self):
        """创建增强的浏览器上下文 - 集成指纹管理器"""
        logger.info("[CRAWLER_CORE] 创建增强浏览器上下文...")

        try:
            # 获取当前指纹配置
            fingerprint = self.fingerprint_manager.get_current_fingerprint()

            p = sync_playwright().start()

            # 使用增强的浏览器参数
            browser_args = self.fingerprint_manager.get_browser_args()
            browser_config = self.config['browser']

            # 合并配置参数
            all_args = list(set(browser_config.get('args', []) + browser_args))

            browser = p.chromium.launch(
                headless=browser_config.get('headless', False),
                args=all_args
            )

            # 使用指纹配置创建上下文
            context = browser.new_context(
                user_agent=fingerprint.get('user_agent', browser_config.get('user_agent', '')),
                viewport=fingerprint.get('viewport', browser_config.get('viewport', {'width': 1920, 'height': 1080})),
                locale=fingerprint.get('language', 'zh-CN'),
                timezone_id=fingerprint.get('timezone', 'Asia/Shanghai'),
                extra_http_headers={
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'DNT': str(random.choice([0, 1])),
                    'Connection': 'keep-alive',
                    'Upgrade-Insecure-Requests': '1',
                }
            )

            # 添加Cookie（如果有的话）
            try:
                cookies = self.parse_cookies()
                if cookies:
                    context.add_cookies(cookies)
                    logger.info(f"[CRAWLER_CORE] 添加了 {len(cookies)} 个Cookie")
            except Exception as e:
                logger.warning(f"[CRAWLER_CORE] Cookie添加失败: {e}")

            # 创建页面并添加反检测脚本
            page = context.new_page()
            stealth_script = self.fingerprint_manager.get_stealth_script()
            if stealth_script:
                page.add_init_script(stealth_script)
                logger.info("[CRAWLER_CORE] ✅ 反检测脚本已加载")

            logger.info(f"[CRAWLER_CORE] ✅ 增强浏览器上下文创建成功，指纹ID: {fingerprint.get('profile_id', 'unknown')}")
            return p, browser, context, page

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] ❌ 增强浏览器上下文创建失败: {e}")
            # 回退到基础模式
            return self._create_basic_browser_context()

    def _create_basic_browser_context(self):
        """创建基础浏览器上下文（回退模式）"""
        logger.warning("[CRAWLER_CORE] 使用基础浏览器上下文模式")

        browser_config = self.config['browser']

        p = sync_playwright().start()
        browser = p.chromium.launch(
            headless=browser_config.get('headless', False),
            args=browser_config.get('args', [])
        )

        context = browser.new_context(
            user_agent=browser_config.get('user_agent', ''),
            viewport=browser_config.get('viewport', {'width': 1920, 'height': 1080})
        )

        # 添加Cookie
        try:
            cookies = self.parse_cookies()
            if cookies:
                context.add_cookies(cookies)
        except Exception as e:
            logger.warning(f"[CRAWLER_CORE] 基础模式Cookie添加失败: {e}")

        page = context.new_page()
        return p, browser, context, page
    
    def extract_shop_data(self, page: Page, city_name: str, category_name: str) -> List[Dict[str, Any]]:
        """提取商铺数据 - 集成行为模拟器"""
        logger.info(f"[CRAWLER_CORE] 开始提取数据: {city_name}-{category_name}")

        start_time = time.time()
        shops = []

        try:
            # 等待页面加载
            page.wait_for_load_state('networkidle', timeout=25000)

            # 检查Cookie有效性
            if 'login' in page.url.lower():
                logger.error("[CRAWLER_CORE] Cookie失效，被重定向到登录页面")
                return []

            # 简化的用户行为模拟（避免线程问题）
            logger.info("[CRAWLER_CORE] 🎭 开始执行简化用户行为模拟...")
            try:
                # 简单的等待和滚动，避免复杂的多线程操作
                import time
                import random
                time.sleep(random.uniform(2, 4))  # 随机等待2-4秒

                # 简单滚动
                page.evaluate("window.scrollTo(0, document.body.scrollHeight/3)")
                time.sleep(random.uniform(1, 2))
                page.evaluate("window.scrollTo(0, 0)")
                time.sleep(random.uniform(1, 2))

                behavior_time = random.uniform(4, 8)
                logger.info(f"[CRAWLER_CORE] ✅ 简化用户行为模拟完成，耗时: {behavior_time:.1f}秒")
            except Exception as e:
                logger.warning(f"[CRAWLER_CORE] 用户行为模拟失败，继续数据提取: {e}")
                behavior_time = 0

            # 使用更可靠的选择器提取商铺信息
            shops = self._extract_shops_with_selectors(page, city_name, category_name)

            extraction_time = time.time() - start_time
            self.stats['shops_extracted'] += len(shops)

            logger.info(f"[CRAWLER_CORE] {city_name}-{category_name} 提取完成，共 {len(shops)} 条数据，总耗时 {extraction_time:.1f}秒")

            return shops

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] {city_name}-{category_name} 数据提取失败: {e}")
            self.stats['errors_encountered'] += 1
            return []
    
    def _extract_shops_with_selectors(self, page: Page, city_name: str, category_name: str) -> List[Dict[str, Any]]:
        """使用选择器提取商铺信息"""
        shops = []
        
        try:
            # 等待商铺列表加载 - 基于您提供的xpath
            page.wait_for_selector('#shop-all-list ul li, #shop-all-list, .txt, h4', timeout=15000)

            # 基于xpath //*[@id="shop-all-list"]/ul/li[1] 的选择器策略
            shop_selectors = [
                '#shop-all-list ul li',  # 主要选择器，基于您的xpath
                '#shop-all-list li',     # 备用选择器
                '.txt',  # 大众点评常用文本容器
                '.tit',  # 大众点评标题容器
                'li[class=""]',  # 原有选择器
                '.shop-list li',
                '.shop-item',
                '.shop-wrap',
                'li:has(.shop-name)',
                'li:has(h4)',  # 包含h4标题的li元素
                'li:has(a[title])',  # 包含title属性链接的li元素
                '[data-click-name]',  # 大众点评数据属性
                '.J_brief-cont'  # 大众点评JS容器
            ]
            
            shop_elements = []
            for selector in shop_selectors:
                try:
                    elements = page.query_selector_all(selector)
                    logger.info(f"[CRAWLER_CORE] 选择器 '{selector}' 找到 {len(elements)} 个元素")

                    if elements and len(elements) >= 3:  # 降低阈值，找到3个以上就认为有效
                        shop_elements = elements
                        logger.info(f"[CRAWLER_CORE] ✅ 使用选择器 {selector} 找到 {len(elements)} 个商铺")
                        break
                except Exception as e:
                    logger.debug(f"[CRAWLER_CORE] 选择器 '{selector}' 失败: {e}")
                    continue
            
            if not shop_elements:
                logger.warning("[CRAWLER_CORE] 未找到商铺元素")
                return []
            
            # 提取每个商铺的信息
            for i, element in enumerate(shop_elements):
                try:
                    shop_data = self._extract_single_shop(element, city_name, category_name)
                    if shop_data:
                        shops.append(shop_data)
                except Exception as e:
                    logger.debug(f"[CRAWLER_CORE] 提取第{i+1}个商铺失败: {e}")
                    continue
            
            return shops
            
        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 选择器提取失败: {e}")
            # 回退到正则表达式方法
            return self._extract_shops_with_regex(page, city_name, category_name)
    
    def _extract_single_shop(self, element, city_name: str, category_name: str) -> Optional[Dict[str, Any]]:
        """提取单个商铺信息"""
        try:
            # 提取商铺名称 - 大众点评专用选择器
            name_selectors = [
                'h4',  # 主要标题
                'a[title]',  # 带title属性的链接
                '.shop-name',
                '.title',
                '.tit',  # 大众点评标题类
                '.txt h4',  # txt容器内的h4
                'a[data-click-name]',  # 大众点评数据属性链接
                '.J_brief-cont h4'  # JS容器内的h4
            ]
            shop_name = ""
            
            for selector in name_selectors:
                try:
                    name_element = element.query_selector(selector)
                    if name_element:
                        # 优先使用title属性
                        if selector == 'a[title]':
                            shop_name = name_element.get_attribute('title')
                        else:
                            shop_name = name_element.inner_text().strip()

                        if shop_name and shop_name.strip():
                            shop_name = shop_name.strip()
                            logger.debug(f"[CRAWLER_CORE] 使用选择器 '{selector}' 提取到店名: {shop_name}")
                            break
                except Exception as e:
                    logger.debug(f"[CRAWLER_CORE] 选择器 '{selector}' 提取店名失败: {e}")
                    continue
            
            if not shop_name:
                return None
            
            # 提取价格 - 大众点评专用选择器
            price_selectors = [
                'b:has-text("￥")',
                '.price',
                '.avg-price',
                'span:has-text("￥")',
                '.txt b',  # txt容器内的粗体文本
                '.mean-price',  # 平均价格
                'b',  # 所有粗体元素
                'span[class*="price"]',  # 包含price的class
                '.J_brief-cont b'  # JS容器内的粗体
            ]
            avg_price = ""
            
            for selector in price_selectors:
                try:
                    price_element = element.query_selector(selector)
                    if price_element:
                        price_text = price_element.inner_text()
                        # 提取数字
                        price_match = re.search(r'￥(\d+)', price_text)
                        if price_match:
                            avg_price = price_match.group(1)
                            break
                except:
                    continue
            
            # 构建数据结构
            shop_data = {
                'city': city_name,
                'primary_category': '美食',
                'secondary_category': category_name,
                'shop_name': shop_name,
                'avg_price': avg_price
            }
            
            return shop_data
            
        except Exception as e:
            logger.debug(f"[CRAWLER_CORE] 提取单个商铺失败: {e}")
            return None
    
    def _extract_shops_with_regex(self, page: Page, city_name: str, category_name: str) -> List[Dict[str, Any]]:
        """使用正则表达式提取商铺信息（备用方法）"""
        shops = []
        
        try:
            content = page.content()
            
            # 使用多种正则表达式模式提取商铺信息
            shop_patterns = [
                r'<li class="">(.*?)</li>',  # 原有模式
                r'<li[^>]*>(.*?)</li>',  # 任何li元素
                r'<div class="txt">(.*?)</div>',  # txt容器
                r'<div class="tit">(.*?)</div>'  # tit容器
            ]

            all_blocks = []
            for pattern in shop_patterns:
                blocks = re.findall(pattern, content, re.DOTALL)
                if blocks:
                    all_blocks.extend(blocks)
                    logger.info(f"[CRAWLER_CORE] 模式 '{pattern}' 找到 {len(blocks)} 个商铺块")

            logger.info(f"[CRAWLER_CORE] 正则表达式总共找到 {len(all_blocks)} 个商铺块")

            for i, block in enumerate(all_blocks):
                try:
                    # 多种商铺名称提取模式
                    name_patterns = [
                        r'<h4[^>]*>([^<]+)</h4>',
                        r'title="([^"]*[店铺][^"]*)"',
                        r'<a[^>]*title="([^"]*)"[^>]*>',
                        r'class="[^"]*tit[^"]*"[^>]*>([^<]+)<'
                    ]

                    shop_name = ""
                    for name_pattern in name_patterns:
                        name_match = re.search(name_pattern, block)
                        if name_match:
                            shop_name = name_match.group(1).strip()
                            break

                    if not shop_name:
                        continue

                    # 多种价格提取模式
                    price_patterns = [
                        r'<b>￥(\d+)</b>',
                        r'人均[￥¥](\d+)',
                        r'￥(\d+)',
                        r'¥(\d+)'
                    ]

                    avg_price = ""
                    for price_pattern in price_patterns:
                        price_match = re.search(price_pattern, block)
                        if price_match:
                            avg_price = price_match.group(1)
                            break
                    
                    # 构建数据结构
                    shop_data = {
                        'city': city_name,
                        'primary_category': '美食',
                        'secondary_category': category_name,
                        'shop_name': shop_name,
                        'avg_price': avg_price
                    }
                    
                    shops.append(shop_data)
                    
                except Exception as e:
                    logger.debug(f"[CRAWLER_CORE] 正则提取第{i+1}个商铺失败: {e}")
                    continue
            
            return shops
            
        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 正则表达式提取失败: {e}")
            return []
    
    def simulate_user_behavior(self, page: Page):
        """模拟用户行为 - 已弃用，使用 behavior_simulator 替代"""
        logger.warning("[CRAWLER_CORE] ⚠️ 使用已弃用的用户行为模拟方法，建议使用 behavior_simulator")

        try:
            # 调用新的行为模拟器
            return self.behavior_simulator.simulate_realistic_browsing(page)

        except Exception as e:
            logger.warning(f"[CRAWLER_CORE] 行为模拟器调用失败，使用基础模拟: {e}")

            # 基础行为模拟（回退模式）
            try:
                logger.debug("[CRAWLER_CORE] 开始基础用户行为模拟...")

                # 随机停留时间
                time.sleep(random.uniform(2, 4))

                # 随机鼠标移动
                for _ in range(random.randint(3, 6)):
                    x = random.randint(100, 1800)
                    y = random.randint(100, 1000)
                    page.mouse.move(x, y)
                    time.sleep(random.uniform(0.5, 1.5))

                # 随机滚动
                scroll_positions = [0.3, 0.6, 0.9]
                for pos in scroll_positions:
                    page.evaluate(f"window.scrollTo(0, document.body.scrollHeight * {pos})")
                    time.sleep(random.uniform(1, 2))

                # 最终停留
                time.sleep(random.uniform(1, 3))

                logger.debug("[CRAWLER_CORE] 基础用户行为模拟完成")
                return 10.0  # 估算时间

            except Exception as e:
                logger.warning(f"[CRAWLER_CORE] 基础用户行为模拟异常: {e}")
                return 5.0
    
    def detect_captcha(self, page: Page, city_name: str = "", category_name: str = "") -> Optional[str]:
        """检测验证码"""
        try:
            # 验证码选择器
            captcha_selectors = [
                'img[src*="captcha"]',
                '.captcha',
                '.geetest_holder',
                'input[placeholder*="验证码"]'
            ]

            for selector in captcha_selectors:
                try:
                    element = page.query_selector(selector)
                    if element and element.is_visible():
                        logger.warning(f"[CRAWLER_CORE] ⚠️ 检测到验证码: {selector}")
                        # 发送验证码通知
                        if city_name and category_name:
                            self.send_notification(
                                "验证码检测",
                                f"{city_name}-{category_name} 需要人工处理"
                            )

                        # 等待用户处理验证码
                        self.wait_for_captcha_resolution(page, selector)
                        return f"验证码: {selector}"
                except:
                    continue

            # 检查URL和标题
            current_url = page.url
            page_title = page.title()

            if any(keyword in current_url.lower() for keyword in ['captcha', 'verify', 'challenge']):
                # 发送验证码通知
                if city_name and category_name:
                    self.send_notification(
                        "验证码检测",
                        f"{city_name}-{category_name} 需要人工处理"
                    )
                return f"验证码URL: {current_url}"

            if any(keyword in page_title.lower() for keyword in ['验证码', 'captcha', 'verify']):
                # 发送验证码通知
                if city_name and category_name:
                    self.send_notification(
                        "验证码检测",
                        f"{city_name}-{category_name} 需要人工处理"
                    )
                return f"验证码标题: {page_title}"

            # 检查403 Forbidden页面
            if "403" in page_title or "Forbidden" in page_title or "403" in current_url:
                logger.warning(f"[CRAWLER_CORE] ⚠️ 检测到403 Forbidden页面")

                # 记录403错误到反爬管理器
                if self.anti_spider:
                    self.anti_spider.record_403_error("current", current_url)

                if city_name and category_name:
                    self.send_notification(
                        "风控检测",
                        f"{city_name}-{category_name} 遇到403风控，需要处理"
                    )
                return f"403 Forbidden: {page_title}"

            return None

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 验证码检测异常: {e}")
            return None

    def wait_for_captcha_resolution(self, page: Page, captcha_selector: str):
        """等待用户处理验证码"""
        logger.info(f"[CRAWLER_CORE] 🔄 等待用户处理验证码...")
        logger.info(f"[CRAWLER_CORE] 📋 验证码类型: {captcha_selector}")
        logger.info(f"[CRAWLER_CORE] ⏰ 等待时间: 最多5分钟")

        # 显示用户提示
        print("\n" + "="*80)
        print("🚨 检测到验证码，需要人工处理！")
        print("="*80)
        print(f"📍 当前页面: {page.url}")
        print(f"🔍 验证码元素: {captcha_selector}")
        print("📝 请在浏览器中完成验证码验证")
        print("⏰ 系统将等待5分钟，请尽快完成验证")
        print("💡 完成验证后，页面会自动继续")
        print("="*80)

        max_wait_time = 300  # 5分钟
        check_interval = 5   # 每5秒检查一次
        waited_time = 0

        try:
            while waited_time < max_wait_time:
                # 检查验证码是否已消失
                try:
                    captcha_element = page.query_selector(captcha_selector)
                    if not captcha_element or not captcha_element.is_visible():
                        logger.info(f"[CRAWLER_CORE] ✅ 验证码已处理完成")
                        print("\n✅ 验证码验证完成，继续爬取...")
                        return True
                except:
                    # 如果元素查找失败，可能验证码已消失
                    logger.info(f"[CRAWLER_CORE] ✅ 验证码元素已消失，可能已完成验证")
                    print("\n✅ 验证码验证完成，继续爬取...")
                    return True

                # 检查页面是否发生跳转（验证成功的标志）
                current_url = page.url
                if 'captcha' not in current_url.lower() and 'verify' not in current_url.lower():
                    logger.info(f"[CRAWLER_CORE] ✅ 页面已跳转，验证码可能已完成")
                    print("\n✅ 页面跳转成功，继续爬取...")
                    return True

                # 等待并显示进度
                time.sleep(check_interval)
                waited_time += check_interval

                remaining_time = max_wait_time - waited_time
                if remaining_time > 0:
                    print(f"\r⏳ 等待验证码处理... 剩余时间: {remaining_time}秒", end="", flush=True)

            # 超时处理
            logger.warning(f"[CRAWLER_CORE] ⚠️ 验证码等待超时（{max_wait_time}秒）")
            print(f"\n⚠️ 验证码等待超时，将继续尝试爬取...")
            return False

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 验证码等待异常: {e}")
            print(f"\n❌ 验证码等待异常: {e}")
            return False
    
    def check_anti_spider_status(self) -> Tuple[bool, str]:
        """检查反爬状态"""
        if self.anti_spider:
            return self.anti_spider.should_stop_crawling()
        return False, ""

    def get_anti_spider_report(self) -> Dict[str, Any]:
        """获取反爬状态报告"""
        if self.anti_spider:
            return self.anti_spider.get_status_report()
        return {}

    def get_crawler_stats(self) -> Dict[str, Any]:
        """获取爬虫统计信息"""
        stats = self.stats.copy()
        if self.anti_spider:
            stats['anti_spider'] = self.get_anti_spider_report()

        # 添加反爬虫增强功能状态
        stats['anti_spider_enhanced'] = {
            'enabled': ANTI_SPIDER_ENHANCED,
            'fingerprint_manager': hasattr(self, 'fingerprint_manager'),
            'behavior_simulator': hasattr(self, 'behavior_simulator'),
            'current_fingerprint': getattr(self.fingerprint_manager, 'current_fingerprint', {}).get('profile_id', 'unknown') if hasattr(self, 'fingerprint_manager') else 'none'
        }

        return stats

    def get_enhancement_status(self) -> Dict[str, Any]:
        """获取反爬虫增强功能状态"""
        current_fingerprint = None
        session_stats = {}

        if hasattr(self, 'fingerprint_manager') and self.fingerprint_manager:
            try:
                fingerprint = self.fingerprint_manager.get_current_fingerprint()
                current_fingerprint = fingerprint.get('profile_id', 'unknown') if fingerprint else 'none'
            except:
                current_fingerprint = 'error'

        if hasattr(self, 'behavior_simulator') and self.behavior_simulator:
            try:
                session_stats = getattr(self.behavior_simulator, 'current_session_data', {})
            except:
                session_stats = {}

        return {
            'anti_spider_enhanced': ANTI_SPIDER_ENHANCED,
            'fingerprint_manager_available': hasattr(self, 'fingerprint_manager'),
            'behavior_simulator_available': hasattr(self, 'behavior_simulator'),
            'current_fingerprint_id': current_fingerprint or 'none',
            'session_stats': session_stats,
            'phase_1_5_enabled': getattr(self, 'phase_1_5_enabled', False),
            'account_manager_available': hasattr(self, 'account_manager') and self.account_manager is not None,
            'scheduler_available': hasattr(self, 'scheduler') and self.scheduler is not None,
            'monitor_available': hasattr(self, 'monitor') and self.monitor is not None
        }

    # ==================== 第1.5阶段新增方法 ====================

    def start_intelligent_crawl_session(self, cities: List[str], categories: List[str]) -> bool:
        """启动智能爬取会话 - 第1.5阶段入口"""
        if not self.phase_1_5_enabled:
            logger.error("[CRAWLER_CORE] 第1.5阶段组件未启用，回退到基础模式")
            return self.start_basic_crawl_session(cities, categories)

        logger.info("[CRAWLER_CORE] 🚀 启动第1.5阶段智能爬取会话")

        # 1. 检查是否适合爬取
        if not self.scheduler.should_start_crawling():
            logger.warning("[CRAWLER_CORE] 当前时段不适合爬取，建议稍后再试")
            self.monitor.print_dashboard()
            return False

        # 2. 获取会话配置
        session_config = self.scheduler.get_session_config()
        network_type = session_config['network_type']

        logger.info(f"[CRAWLER_CORE] 会话配置: {network_type} 网络, {session_config['time_slot']} 时段")

        # 3. 获取可用账号
        account = self.account_manager.get_available_account(network_type)
        if not account:
            logger.error(f"[CRAWLER_CORE] {network_type} 网络无可用账号")
            self.account_manager.log_pool_status(network_type)
            return False

        # 4. 开始监控
        self.monitor.start_session(account['id'], session_config)

        # 5. 执行爬取
        try:
            success = self._execute_crawl_with_account(
                account, cities, categories, session_config
            )

            if success:
                data_count = self.monitor.session_stats['total_crawled']
                self.account_manager.mark_account_success(account['id'], data_count)
                logger.info(f"[CRAWLER_CORE] ✅ 会话成功完成: {data_count} 条数据")
            else:
                self.account_manager.mark_account_error(account['id'], 'general')
                logger.warning("[CRAWLER_CORE] ⚠️ 会话执行失败")

            return success

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 爬取会话异常: {e}")
            self.account_manager.mark_account_error(account['id'], 'exception')
            return False

        finally:
            # 结束监控并显示面板
            self.monitor.end_session("Session completed")
            self.monitor.print_dashboard()

    def _execute_crawl_with_account(self, account: Dict, cities: List[str],
                                   categories: List[str], config: Dict) -> bool:
        """使用指定账号执行爬取"""
        account_id = account['id']
        max_data = config['max_data_per_session']

        logger.info(f"[CRAWLER_CORE] 使用账号 {account_id} 开始爬取，目标: {max_data} 条")

        # 1. 设置账号Cookie
        self.set_account_cookie(account)

        # 2. 创建增强浏览器上下文
        try:
            p, browser, context, page = self.create_browser_context()
        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 浏览器创建失败: {e}")
            return False

        total_crawled = 0
        success_count = 0
        error_count = 0

        try:
            for city in cities:
                for category in categories:
                    if total_crawled >= max_data:
                        logger.info(f"[CRAWLER_CORE] 达到会话限额 {max_data} 条，停止爬取")
                        break

                    # 构造URL并爬取
                    try:
                        page_data = self.crawl_city_category_page(page, city, category)

                        if page_data and len(page_data) > 0:
                            total_crawled += len(page_data)
                            success_count += 1
                            self.monitor.update_progress(len(page_data), True)

                            logger.info(f"[CRAWLER_CORE] {city}-{category}: {len(page_data)} 条数据")

                            # 检查是否需要切换账号
                            if total_crawled >= max_data * config.get('switch_threshold', 0.8):
                                logger.info("[CRAWLER_CORE] 接近账号限额，准备结束会话")
                                break
                        else:
                            error_count += 1
                            error_info = f"{city}-{category} 数据提取失败"
                            self.monitor.update_progress(0, False, error_info)

                            # 检查错误率
                            if error_count >= config.get('error_switch_threshold', 3):
                                logger.warning("[CRAWLER_CORE] 连续错误过多，停止当前账号")
                                self.account_manager.mark_account_error(account_id, '403')
                                return False

                    except Exception as e:
                        error_count += 1
                        error_info = f"{city}-{category} 爬取异常: {str(e)}"
                        self.monitor.update_progress(0, False, error_info)
                        logger.error(f"[CRAWLER_CORE] {error_info}")

                    # 智能延迟
                    delay_time = self.calculate_intelligent_delay(success_count, error_count)
                    logger.debug(f"[CRAWLER_CORE] 智能延迟: {delay_time:.1f}秒")
                    time.sleep(delay_time)

                    # 更新监控面板（每5次更新一次）
                    if (success_count + error_count) % 5 == 0:
                        self.monitor.print_simple_status()

        finally:
            browser.close()
            p.stop()

        # 计算成功率
        total_attempts = success_count + error_count
        success_rate = success_count / total_attempts if total_attempts > 0 else 0

        logger.info(f"[CRAWLER_CORE] 账号 {account_id} 会话完成:")
        logger.info(f"  数据量: {total_crawled} 条")
        logger.info(f"  成功率: {success_rate:.1%} ({success_count}/{total_attempts})")

        return success_rate >= 0.8  # 80%成功率认为成功

    def set_account_cookie(self, account: Dict):
        """设置账号Cookie"""
        cookie_file_path = account.get('cookie_file', '')

        if cookie_file_path:
            cookie_file = Path(__file__).parent.parent / cookie_file_path

            if cookie_file.exists():
                try:
                    with open(cookie_file, 'r', encoding='utf-8') as f:
                        self.cookie_string = f.read().strip()
                        logger.info(f"[CRAWLER_CORE] ✅ 加载账号 {account['id']} 的Cookie")
                        return
                except Exception as e:
                    logger.warning(f"[CRAWLER_CORE] Cookie文件读取失败: {e}")
            else:
                logger.warning(f"[CRAWLER_CORE] Cookie文件不存在: {cookie_file}")

        # 使用默认测试Cookie
        logger.warning(f"[CRAWLER_CORE] 使用默认Cookie for {account['id']}")
        self.cookie_string = f"fspop=test; _lxsdk_cuid=test_{account['id']}"

    def calculate_intelligent_delay(self, success_count: int, error_count: int) -> float:
        """计算智能延迟时间"""
        base_delay = 90  # 基础延迟90秒

        # 根据成功率调整
        total = success_count + error_count
        if total > 0:
            success_rate = success_count / total
            if success_rate < 0.7:
                base_delay *= 2.0  # 成功率低，增加延迟
            elif success_rate > 0.9:
                base_delay *= 0.8  # 成功率高，减少延迟

        # 添加随机变化
        return base_delay * random.uniform(0.8, 1.2)

    def crawl_city_category_page(self, page: Page, city: str, category: str) -> List[Dict]:
        """爬取指定城市和品类的页面数据"""
        try:
            # 这里应该调用现有的数据提取逻辑
            # 为了简化，我们调用现有的 extract_shop_data 方法
            return self.extract_shop_data(page, city, category)

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 页面爬取失败 {city}-{category}: {e}")
            return []

    def start_basic_crawl_session(self, cities: List[str], categories: List[str]) -> bool:
        """基础爬取会话（回退模式）"""
        logger.info("[CRAWLER_CORE] 启动基础爬取会话（回退模式）")

        try:
            # 使用现有的爬取逻辑
            # 这里可以调用原有的爬取方法
            logger.warning("[CRAWLER_CORE] 基础模式暂未实现，请使用原有的爬取方法")
            return False

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 基础爬取会话失败: {e}")
            return False

    def get_phase_1_5_status(self) -> Dict[str, Any]:
        """获取第1.5阶段状态"""
        if not self.phase_1_5_enabled:
            return {'enabled': False, 'reason': 'Components not initialized'}

        status = {
            'enabled': True,
            'account_manager': {
                'available': self.account_manager is not None,
                'pool_status': self.account_manager.get_pool_status() if self.account_manager else {}
            },
            'scheduler': {
                'available': self.scheduler is not None,
                'status': self.scheduler.get_scheduler_status() if self.scheduler else {}
            },
            'monitor': {
                'available': self.monitor is not None,
                'session_active': self.monitor.session_stats['session_id'] is not None if self.monitor else False
            }
        }

        return status

    def discover_city_categories(self, city_name: str, city_code: str = None) -> List[Dict[str, Any]]:
        """发现指定城市的可用品类"""
        if not self.phase_1_5_enabled or not self.category_discovery:
            logger.error("[CRAWLER_CORE] 品类发现功能未启用")
            return []

        # 如果没有提供城市代码，尝试从城市管理器获取
        if not city_code:
            city_code = self.get_city_code(city_name)
            if not city_code:
                logger.error(f"[CRAWLER_CORE] 无法获取城市 {city_name} 的代码")
                return []

        logger.info(f"[CRAWLER_CORE] 🔍 开始发现城市 {city_name} 的可用品类...")

        try:
            categories = self.category_discovery.discover_city_categories(city_name, city_code)
            logger.info(f"[CRAWLER_CORE] ✅ 发现 {len(categories)} 个品类")
            return categories

        except Exception as e:
            logger.error(f"[CRAWLER_CORE] 品类发现失败: {e}")
            return []

    def get_city_code(self, city_name: str) -> Optional[str]:
        """获取城市代码（简化实现）"""
        # 常用城市代码映射
        city_codes = {
            '深圳': 'shenzhen',
            '苏州': 'suzhou',
            '杭州': 'hangzhou',
            '长沙': 'changsha',
            '南京': 'nanjing',
            '无锡': 'wuxi',
            '常州': 'changzhou',
            '镇江': 'zhenjiang',
            '上海': 'shanghai',
            '北京': 'beijing',
            '广州': 'guangzhou',
            '成都': 'chengdu',
            '重庆': 'chongqing',
            '西安': 'xian',
            '武汉': 'wuhan',
            '天津': 'tianjin',
            # 新增城市支持
            '南宁': 'nanning',
            '厦门': 'xiamen'
        }

        return city_codes.get(city_name)

    def start_smart_crawl_with_discovery(self, city_name: str, selected_categories: List[str] = None,
                                       target_data_count: int = 600) -> bool:
        """智能爬取：城市选择 → 品类发现 → 品类选择 → 开始爬取"""
        logger.info(f"[CRAWLER_CORE] 🚀 开始智能爬取流程: {city_name}")

        if not self.phase_1_5_enabled:
            logger.error("[CRAWLER_CORE] 第1.5阶段功能未启用")
            return False

        # 1. 发现城市品类
        logger.info(f"[CRAWLER_CORE] 📋 步骤1: 发现 {city_name} 的可用品类")
        available_categories = self.discover_city_categories(city_name)

        if not available_categories:
            logger.error(f"[CRAWLER_CORE] 无法发现城市 {city_name} 的品类")
            return False

        # 2. 选择品类
        if selected_categories:
            # 验证选择的品类是否可用
            available_names = [cat['name'] for cat in available_categories]
            valid_categories = []

            for selected in selected_categories:
                if selected in available_names:
                    valid_categories.append(selected)
                else:
                    logger.warning(f"[CRAWLER_CORE] 品类 '{selected}' 在 {city_name} 不可用")

            if not valid_categories:
                logger.error("[CRAWLER_CORE] 没有有效的品类可以爬取")
                return False

            final_categories = valid_categories
        else:
            # 自动选择前2个品类
            final_categories = [cat['name'] for cat in available_categories[:2]]

        logger.info(f"[CRAWLER_CORE] 📋 步骤2: 选择品类 {final_categories}")

        # 3. 开始智能爬取
        logger.info(f"[CRAWLER_CORE] 📋 步骤3: 开始爬取 {city_name} × {final_categories}")
        logger.info(f"[CRAWLER_CORE] 🎯 目标数据量: {target_data_count} 条")

        success = self.start_intelligent_crawl_session([city_name], final_categories)

        if success:
            logger.info(f"[CRAWLER_CORE] ✅ 智能爬取完成: {city_name}")
        else:
            logger.warning(f"[CRAWLER_CORE] ❌ 智能爬取失败: {city_name}")

        return success
