#!/usr/bin/env python3
"""
大众点评多维度爬虫 - P4级重构版本主文件
实现模块化架构和核心功能改进
"""

import argparse
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any
from playwright.sync_api import sync_playwright

# 导入重构后的核心模块
from core import CityManager, CategoryDiscovery, SmartPagination, DataManager, CrawlerCore
from utils import DelayManager, setup_logger
from config.settings import ConfigManager

# 设置日志
logger = setup_logger('crawler_p4', 'logs/crawler_p4.log')

class CrawlerP4:
    """大众点评爬虫 P4 版本 - 主协调器"""
    
    def __init__(self):
        logger.info("[CRAWLER_P4] 初始化 P4 版本爬虫...")
        
        # 初始化核心模块
        self.city_manager = CityManager()
        self.category_discovery = CategoryDiscovery()
        self.smart_pagination = SmartPagination()
        self.data_manager = DataManager()
        self.crawler_core = CrawlerCore()
        self.delay_manager = DelayManager()
        
        # 配置管理
        self.config_manager = ConfigManager()
        
        logger.info("[CRAWLER_P4] ✅ P4 版本爬虫初始化完成")
    
    def crawl_single_city(self, city_name: str) -> bool:
        """单城市爬行模式"""
        logger.info(f"[CRAWLER_P4] 🏙️ 开始单城市爬行: {city_name}")

        # 检查反爬状态
        should_stop, reason = self.crawler_core.check_anti_spider_status()
        if should_stop:
            logger.error(f"[CRAWLER_P4] ❌ 反爬检查失败: {reason}")
            self.crawler_core.send_notification("反爬警告", f"爬取被阻止: {reason}")
            return False

        # 获取城市配置
        city_config = self.city_manager.get_single_city_config(city_name)
        if not city_config:
            logger.error(f"[CRAWLER_P4] ❌ 城市配置获取失败: {city_name}")
            return False
        
        city_code = city_config['code']
        
        # 动态发现类别
        categories = self.category_discovery.discover_city_categories(city_name, city_code)
        if not categories:
            logger.error(f"[CRAWLER_P4] ❌ 类别发现失败: {city_name}")
            return False
        
        logger.info(f"[CRAWLER_P4] 📂 {city_name} 发现 {len(categories)} 个类别")
        
        # 爬取每个类别
        total_shops = 0
        for i, category in enumerate(categories):
            category_name = category['name']
            category_id = category['id']
            
            logger.info(f"[CRAWLER_P4] 📂 处理类别 {i+1}/{len(categories)}: {category_name}")
            
            try:
                shops = self._crawl_category_with_smart_pagination(
                    city_name, city_code, category_name, category_id
                )
                
                if shops:
                    added_count, duplicate_count = self.data_manager.add_batch_data(shops, city_name, category_name)
                    total_shops += added_count

                    logger.info(f"[CRAWLER_P4] ✅ {category_name} 完成: 新增 {added_count}, 重复 {duplicate_count}")

                    # 发送品类完成通知
                    self.crawler_core.send_notification(
                        "品类完成",
                        f"{city_name}-{category_name} 已完成 (成功)"
                    )
                else:
                    logger.warning(f"[CRAWLER_P4] ❌ {category_name} 未获取到数据")

                    # 发送品类完成通知（失败）
                    self.crawler_core.send_notification(
                        "品类完成",
                        f"{city_name}-{category_name} 已完成 (失败)"
                    )

                # 类别间延迟
                if i < len(categories) - 1:
                    self.delay_manager.category_switch_delay()

            except Exception as e:
                logger.error(f"[CRAWLER_P4] {category_name} 爬取异常: {e}")

                # 发送品类完成通知（异常）
                self.crawler_core.send_notification(
                    "品类完成",
                    f"{city_name}-{category_name} 已完成 (异常)"
                )
                continue
        
        # 保存数据
        if total_shops > 0:
            self.data_manager.save_data()
            logger.info(f"[CRAWLER_P4] 🎉 {city_name} 单城市爬行完成，共获取 {total_shops} 个商铺")

            # 发送城市完成通知
            self.crawler_core.send_notification(
                "城市完成",
                f"{city_name} 全部20个品类已处理完成"
            )
            return True
        else:
            logger.warning(f"[CRAWLER_P4] ⚠️ {city_name} 单城市爬行未获取到数据")

            # 发送城市完成通知（无数据）
            self.crawler_core.send_notification(
                "城市完成",
                f"{city_name} 全部20个品类已处理完成（无数据）"
            )
            return False
    
    def crawl_all_cities(self) -> bool:
        """全城市爬行模式"""
        logger.info("[CRAWLER_P4] 🌍 开始全城市爬行模式")
        
        cities = self.city_manager.get_city_priority_order()
        logger.info(f"[CRAWLER_P4] 将爬取 {len(cities)} 个城市")
        
        successful_cities = 0
        total_shops = 0
        
        for i, (city_name, city_code) in enumerate(cities):
            logger.info(f"[CRAWLER_P4] 🏙️ 处理城市 {i+1}/{len(cities)}: {city_name}")
            
            try:
                if self.crawl_single_city(city_name):
                    successful_cities += 1
                    
                    # 获取当前数据统计
                    stats = self.data_manager.get_current_stats()
                    total_shops = stats['current_data_count']
                
                # 城市间延迟
                if i < len(cities) - 1:
                    self.delay_manager.city_switch_delay()
                    
            except Exception as e:
                logger.error(f"[CRAWLER_P4] {city_name} 城市爬取异常: {e}")
                continue
        
        # 最终保存
        self.data_manager.save_data()
        
        logger.info(f"[CRAWLER_P4] 🎉 全城市爬行完成")
        logger.info(f"[CRAWLER_P4] 📊 成功城市: {successful_cities}/{len(cities)}")
        logger.info(f"[CRAWLER_P4] 📊 总商铺数: {total_shops}")
        
        return successful_cities > 0
    
    def _crawl_category_with_smart_pagination(self, city_name: str, city_code: str,
                                            category_name: str, category_id: str) -> List[Dict[str, Any]]:
        """使用智能分页爬取类别"""
        logger.info(f"[CRAWLER_P4] 🔍 智能分页爬取: {city_name}-{category_name}")

        all_shops = []

        # 使用增强的浏览器创建方法
        try:
            p, browser, context, page = self.crawler_core.create_browser_context()
            logger.info("[CRAWLER_P4] ✅ 使用增强浏览器上下文")

            try:
                # 首先访问第一页检测分页信息
                first_url = f"https://www.dianping.com/{city_code}/ch10/{category_id}"
                page.goto(first_url, timeout=30000)
                page.wait_for_load_state('networkidle', timeout=15000)

                # 检测分页信息
                pagination_info = self.smart_pagination.detect_pagination_info(page)
                max_pages = self.smart_pagination.get_max_pages_for_crawling(pagination_info)
                use_fallback = self.smart_pagination.should_use_fallback(pagination_info)

                logger.info(f"[CRAWLER_P4] 📊 分页策略: 最大{max_pages}页, 后备方法: {use_fallback}")

                page_num = 1
                consecutive_empty_pages = 0

                while page_num <= max_pages:
                    try:
                        # 构造URL（第一页已经访问过了）
                        if page_num > 1:
                            url = f"https://www.dianping.com/{city_code}/ch10/{category_id}p{page_num}"
                            logger.info(f"[CRAWLER_P4] 📄 第{page_num}页: {url}")
                            page.goto(url, timeout=30000)
                        else:
                            logger.info(f"[CRAWLER_P4] 📄 第{page_num}页: {first_url}")

                        # 检查验证码
                        captcha = self.crawler_core.detect_captcha(page, city_name, category_name)
                        if captcha:
                            logger.error(f"[CRAWLER_P4] ❌ 检测到验证码: {captcha}")
                            break

                        # 提取数据（已集成增强用户行为模拟）
                        page_shops = self.crawler_core.extract_shop_data(page, city_name, category_name)

                        if page_shops:
                            all_shops.extend(page_shops)
                            consecutive_empty_pages = 0
                            logger.info(f"[CRAWLER_P4] ✅ 第{page_num}页: {len(page_shops)} 个商铺")
                        else:
                            consecutive_empty_pages += 1
                            logger.warning(f"[CRAWLER_P4] ❌ 第{page_num}页: 无数据")

                        # 智能分页检测（仅在非后备模式下使用）
                        if not use_fallback:
                            if self.smart_pagination.is_last_page(page, page_num, max_pages):
                                logger.info(f"[CRAWLER_P4] 🛑 智能检测到最后一页: {page_num}")
                                break

                        # 连续空页检测
                        if consecutive_empty_pages >= 3:
                            logger.info(f"[CRAWLER_P4] 🛑 连续3页无数据，停止爬取")
                            break

                        page_num += 1

                        # 翻页延迟
                        self.delay_manager.page_turn_delay(is_empty_page=(len(page_shops) == 0))

                    except Exception as e:
                        logger.error(f"[CRAWLER_P4] 第{page_num}页异常: {e}")
                        break

                # 记录分页统计
                pagination_stats = self.smart_pagination.get_pagination_stats()
                logger.info(f"[CRAWLER_P4] 📊 分页统计: {pagination_stats}")
                logger.info(f"[CRAWLER_P4] 📊 {category_name} 智能分页完成: {len(all_shops)} 个商铺，{page_num-1} 页")
                return all_shops
                
            except Exception as e:
                logger.error(f"[CRAWLER_P4] 智能分页爬取异常: {e}")
                return all_shops
            
            finally:
                try:
                    browser.close()
                    p.stop()
                    logger.info("[CRAWLER_P4] 🔒 浏览器资源已释放")
                except Exception as e:
                    logger.warning(f"[CRAWLER_P4] 浏览器关闭异常: {e}")
        except Exception as e:
            logger.error(f"[CRAWLER_P4] 浏览器创建失败: {e}")
            return []
    
    def test_category_discovery(self, city_name: str) -> Dict[str, Any]:
        """测试类别发现功能"""
        logger.info(f"[CRAWLER_P4] 🧪 测试类别发现: {city_name}")
        
        city_info = self.city_manager.get_city_by_name(city_name)
        if not city_info:
            return {'error': f'城市不存在: {city_name}'}
        
        return self.category_discovery.test_category_discovery(city_name, city_info['code'])
    
    def test_pagination(self, city_name: str, category_name: str) -> Dict[str, Any]:
        """测试分页功能"""
        logger.info(f"[CRAWLER_P4] 🧪 测试分页功能: {city_name}-{category_name}")
        
        city_info = self.city_manager.get_city_by_name(city_name)
        if not city_info:
            return {'error': f'城市不存在: {city_name}'}
        
        # 获取类别信息
        categories = self.category_discovery.discover_city_categories(city_name, city_info['code'])
        target_category = None
        
        for cat in categories:
            if cat['name'] == category_name:
                target_category = cat
                break
        
        if not target_category:
            return {'error': f'类别不存在: {category_name}'}
        
        base_url = target_category['url']
        return self.smart_pagination.create_pagination_test_report(city_name, category_name, base_url)
    
    def get_comprehensive_report(self) -> Dict[str, Any]:
        """获取综合报告"""
        return {
            'city_summary': self.city_manager.get_crawl_summary(),
            'data_quality': self.data_manager.get_data_quality_report(),
            'pagination_stats': self.smart_pagination.get_pagination_stats(),
            'crawler_stats': self.crawler_core.get_crawler_stats(),
            'delay_stats': self.delay_manager.get_delay_stats(),
            'category_cache': self.category_discovery.get_category_cache_info()
        }

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='大众点评爬虫 P4 版本')
    parser.add_argument('--city', help='指定要爬取的城市名称（单城市模式）')
    parser.add_argument('--test-category', help='测试类别发现功能的城市名称')
    parser.add_argument('--test-pagination', nargs=2, metavar=('CITY', 'CATEGORY'), 
                       help='测试分页功能：城市名称 类别名称')
    parser.add_argument('--report', action='store_true', help='生成综合报告')
    
    args = parser.parse_args()
    
    # 初始化爬虫
    crawler = CrawlerP4()
    
    try:
        if args.city:
            # 单城市爬行模式
            success = crawler.crawl_single_city(args.city)
            logger.info(f"单城市爬行结果: {'成功' if success else '失败'}")
            
        elif args.test_category:
            # 测试类别发现
            result = crawler.test_category_discovery(args.test_category)
            logger.info(f"类别发现测试结果: {result}")
            
        elif args.test_pagination:
            # 测试分页功能
            city_name, category_name = args.test_pagination
            result = crawler.test_pagination(city_name, category_name)
            logger.info(f"分页测试结果: {result}")
            
        elif args.report:
            # 生成综合报告
            report = crawler.get_comprehensive_report()
            logger.info(f"综合报告: {report}")
            
        else:
            # 默认全城市爬行模式
            success = crawler.crawl_all_cities()
            logger.info(f"全城市爬行结果: {'成功' if success else '失败'}")
            
    except KeyboardInterrupt:
        logger.info("用户中断爬取")
    except Exception as e:
        logger.error(f"爬取过程异常: {e}")
    finally:
        # 保存最终数据
        crawler.data_manager.save_data()
        logger.info("爬虫程序结束")

if __name__ == "__main__":
    main()
