#!/usr/bin/env python3
"""
批量爬取启动器 - 南宁厦门专项任务
基于第1.5阶段智能爬虫系统的批量爬取解决方案
"""

import sys
import logging
import time
import yaml
import random
import json
import signal
import csv
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime, timedelta

# 添加项目路径
sys.path.append(str(Path(__file__).parent))

from core.crawler_core import CrawlerCore
from core.data_deduplicator import DataDeduplicator
from utils.logger import setup_logger

# 设置日志
logger = setup_logger('batch_crawl', 'logs/batch_crawl_main.log')

class BatchCrawlLauncher:
    """批量爬取启动器"""
    
    def __init__(self, config_file: str = "config/batch_crawl_config.yaml"):
        """初始化批量爬取器"""
        self.config_file = Path(config_file)
        self.config = self.load_config()
        self.crawler = None
        self.deduplicator = None
        
        # 任务状态
        self.task_status = {
            'start_time': None,
            'current_city': None,
            'current_category': None,
            'total_crawled': 0,
            'city_results': {},
            'errors': [],
            'is_paused': False,
            'actual_crawled_data': [],  # 本次实际爬取的数据
            'session_data_count': 0     # 本次会话数据计数
        }
        
        # 设置信号处理
        signal.signal(signal.SIGINT, self.signal_handler)
        signal.signal(signal.SIGTERM, self.signal_handler)

        logger.info("[BATCH_CRAWL] 批量爬取启动器初始化完成")
    
    def load_config(self) -> Dict[str, Any]:
        """加载配置文件"""
        try:
            with open(self.config_file, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            
            logger.info(f"[BATCH_CRAWL] ✅ 配置文件加载成功: {self.config_file}")
            return config
            
        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 配置文件加载失败: {e}")
            raise
    
    def print_welcome_banner(self):
        """打印欢迎横幅"""
        task_info = self.config['task_info']
        
        banner = f"""
╔══════════════════════════════════════════════════════════════════════╗
║                    🚀 批量爬取任务启动器                            ║
║                                                                      ║
║  任务名称: {task_info['name']:<50} ║
║  任务描述: {task_info['description']:<50} ║
║  目标数据: {task_info['target_data_count']:<10} 条                                    ║
║  预估时间: {task_info['estimated_duration']:<50} ║
║                                                                      ║
║  🏙️  城市: 南宁、厦门                                               ║
║  🍽️  品类: 烤肉、日式料理、面包蛋糕甜品                            ║
║  📊 深度: 每品类15页                                                ║
║  🔒 安全: 城市级Cookie绑定，智能间隔控制                           ║
╚══════════════════════════════════════════════════════════════════════╝
        """
        print(banner)
    
    def initialize_system(self) -> bool:
        """初始化系统"""
        logger.info("[BATCH_CRAWL] 🔧 初始化批量爬取系统...")
        
        try:
            # 1. 初始化爬虫核心
            self.crawler = CrawlerCore()
            
            if not self.crawler.phase_1_5_enabled:
                logger.error("[BATCH_CRAWL] ❌ 第1.5阶段功能未启用")
                return False
            
            logger.info("[BATCH_CRAWL] ✅ 爬虫核心初始化成功")
            
            # 2. 初始化数据去重器
            history_dir = self.config['data_management']['history_data_dir']
            self.deduplicator = DataDeduplicator(history_dir)
            
            logger.info("[BATCH_CRAWL] ✅ 数据去重器初始化成功")
            
            # 3. 检查Cookie文件
            if not self.check_cookie_files():
                return False
            
            # 4. 创建输出目录
            self.create_output_directories()
            
            logger.info("[BATCH_CRAWL] ✅ 系统初始化完成")
            return True
            
        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 系统初始化失败: {e}")
            return False
    
    def check_cookie_files(self) -> bool:
        """检查Cookie文件"""
        logger.info("[BATCH_CRAWL] 🍪 检查Cookie文件...")
        
        cookie_config = self.config['cookie_config']
        
        for cookie_id, cookie_info in cookie_config.items():
            cookie_path = Path(cookie_info['file_path'])
            
            if not cookie_path.exists():
                logger.error(f"[BATCH_CRAWL] ❌ Cookie文件不存在: {cookie_path}")
                return False
            
            # 检查文件大小
            file_size = cookie_path.stat().st_size
            if file_size < 100:  # Cookie文件太小可能无效
                logger.warning(f"[BATCH_CRAWL] ⚠️ Cookie文件可能无效 {cookie_path}: {file_size} bytes")
            
            logger.info(f"[BATCH_CRAWL] ✅ {cookie_info['description']}: {cookie_path}")
        
        return True
    
    def create_output_directories(self):
        """创建输出目录"""
        output_dir = Path(self.config['data_management']['output_data_dir'])
        output_dir.mkdir(parents=True, exist_ok=True)
        
        # 创建子目录
        (output_dir / "data").mkdir(exist_ok=True)
        (output_dir / "logs").mkdir(exist_ok=True)
        (output_dir / "reports").mkdir(exist_ok=True)
        
        logger.info(f"[BATCH_CRAWL] ✅ 输出目录创建完成: {output_dir}")
    
    def show_pre_crawl_analysis(self):
        """显示爬取前分析"""
        logger.info("[BATCH_CRAWL] 📊 爬取前数据分析")
        
        # 去重报告
        dedup_report = self.deduplicator.get_deduplication_report()
        logger.info(f"[BATCH_CRAWL] 历史数据总量: {dedup_report['total_existing_data']} 条")
        
        # 城市任务分析
        city_tasks = self.config['city_tasks']
        
        for task in city_tasks:
            city_name = task['city_name']
            categories = task['target_categories']
            
            existing_count = self.deduplicator.get_city_existing_count(city_name)
            logger.info(f"[BATCH_CRAWL] {city_name} 现有数据: {existing_count} 条")
            
            for category in categories:
                cat_count = self.deduplicator.get_existing_count(city_name, category)
                logger.info(f"[BATCH_CRAWL]   {category}: {cat_count} 条")
    
    def confirm_execution_plan(self) -> bool:
        """确认执行计划"""
        print("\n📋 执行计划确认:")
        
        city_tasks = self.config['city_tasks']
        total_expected = 0
        
        for i, task in enumerate(city_tasks, 1):
            city_name = task['city_name']
            categories = task['target_categories']
            expected_count = task['expected_data_count']
            time_slot = task['execution_time_slot']
            
            print(f"  {i}. {city_name} ({time_slot})")
            print(f"     品类: {', '.join(categories)}")
            print(f"     预期数据: {expected_count} 条")
            print(f"     Cookie: {task['cookie_file']}")
            
            total_expected += expected_count
        
        print(f"\n📊 总预期数据量: {total_expected} 条")
        print(f"⏱️  预估执行时间: {self.config['task_info']['estimated_duration']}")
        
        # 安全提醒
        print(f"\n⚠️  安全提醒:")
        print(f"  • 请确保网络环境稳定")
        print(f"  • 准备手动处理验证码")
        print(f"  • 监控系统资源使用")
        print(f"  • 遇到异常立即停止")
        
        while True:
            try:
                confirm = input(f"\n确认开始批量爬取? (y/n): ").strip().lower()
                
                if confirm in ['y', 'yes', '是', '确认']:
                    return True
                elif confirm in ['n', 'no', '否', '取消']:
                    return False
                else:
                    print("❌ 请输入 y(是) 或 n(否)")
                    
            except KeyboardInterrupt:
                print("\n👋 用户取消操作")
                return False
    
    def execute_batch_crawl(self) -> bool:
        """执行批量爬取"""
        logger.info("[BATCH_CRAWL] 🚀 开始执行批量爬取任务")
        
        self.task_status['start_time'] = time.time()
        city_tasks = self.config['city_tasks']
        
        try:
            for i, task in enumerate(city_tasks, 1):
                city_name = task['city_name']
                self.task_status['current_city'] = city_name
                
                logger.info(f"[BATCH_CRAWL] 📍 开始爬取城市 {i}/{len(city_tasks)}: {city_name}")
                
                # 执行城市任务
                city_success = self.execute_city_task(task)
                
                if not city_success:
                    logger.error(f"[BATCH_CRAWL] ❌ 城市 {city_name} 爬取失败")
                    
                    # 询问是否继续
                    if not self.ask_continue_on_error(city_name):
                        return False
                
                # 城市间间隔
                if i < len(city_tasks):
                    self.city_interval_wait()
            
            logger.info("[BATCH_CRAWL] 🎉 批量爬取任务完成")
            return True
            
        except KeyboardInterrupt:
            logger.info("[BATCH_CRAWL] 👋 用户中断任务")
            return False
        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 批量爬取异常: {e}")
            return False
        finally:
            self.generate_final_report()
    
    def execute_city_task(self, task: Dict[str, Any]) -> bool:
        """执行城市任务"""
        city_name = task['city_name']
        categories = task['target_categories']
        cookie_file = task['cookie_file']
        max_pages = task['max_pages_per_category']
        
        logger.info(f"[BATCH_CRAWL] 🏙️ 开始爬取 {city_name}")
        logger.info(f"[BATCH_CRAWL] 🍪 使用Cookie: {cookie_file}")
        
        # 设置Cookie
        self.set_city_cookie(cookie_file)
        
        city_results = {
            'city_name': city_name,
            'start_time': time.time(),
            'categories': {},
            'total_crawled': 0,
            'success': False
        }
        
        try:
            for j, category in enumerate(categories, 1):
                self.task_status['current_category'] = category

                logger.info(f"[BATCH_CRAWL] 🍽️ 爬取品类 {j}/{len(categories)}: {category}")

                # 执行品类爬取
                category_result = self.execute_category_crawl(
                    city_name, category, max_pages
                )

                city_results['categories'][category] = category_result
                city_results['total_crawled'] += category_result.get('crawled_count', 0)

                # 品类间间隔（第一个品类不需要等待）
                if j < len(categories):
                    logger.info(f"[BATCH_CRAWL] 📊 {category} 完成，准备下一个品类...")
                    self.category_interval_wait()
            
            city_results['success'] = True
            city_results['end_time'] = time.time()
            
            self.task_status['city_results'][city_name] = city_results
            
            logger.info(f"[BATCH_CRAWL] ✅ {city_name} 爬取完成: {city_results['total_crawled']} 条")
            return True
            
        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ {city_name} 爬取异常: {e}")
            city_results['error'] = str(e)
            city_results['end_time'] = time.time()
            self.task_status['city_results'][city_name] = city_results
            return False
    
    def set_city_cookie(self, cookie_file: str):
        """设置城市专用Cookie"""
        # 首先尝试指定的Cookie文件
        cookie_path = Path(cookie_file)

        if cookie_path.exists():
            try:
                with open(cookie_path, 'r', encoding='utf-8') as f:
                    cookie_content = f.read().strip()

                # 清理Cookie内容（移除引号和换行符）
                cookie_content = self.clean_cookie_content(cookie_content)

                # 设置到爬虫核心
                self.crawler.cookie_string = cookie_content
                logger.info(f"[BATCH_CRAWL] ✅ Cookie设置成功: {cookie_file}")
                return

            except Exception as e:
                logger.error(f"[BATCH_CRAWL] ❌ Cookie设置失败: {e}")

        # 如果指定文件不存在，尝试从主Cookie文件加载
        logger.warning(f"[BATCH_CRAWL] Cookie文件不存在: {cookie_file}")
        logger.info(f"[BATCH_CRAWL] 尝试从主Cookie文件加载...")

        main_cookie_file = Path("config/cookie.txt")
        if main_cookie_file.exists():
            try:
                # 从主Cookie文件中提取对应的Cookie
                cookie_content = self.extract_cookie_from_main_file(cookie_file)
                if cookie_content:
                    self.crawler.cookie_string = cookie_content
                    logger.info(f"[BATCH_CRAWL] ✅ 从主Cookie文件加载成功")
                    return
            except Exception as e:
                logger.error(f"[BATCH_CRAWL] 从主Cookie文件加载失败: {e}")

        # 最后使用默认Cookie
        logger.warning(f"[BATCH_CRAWL] 使用默认测试Cookie")
        self.crawler.cookie_string = "fspop=test; _lxsdk_cuid=test_batch_crawl"

    def clean_cookie_content(self, cookie_content: str) -> str:
        """清理Cookie内容"""
        # 移除引号包装
        if cookie_content.startswith('"') and cookie_content.endswith('"'):
            cookie_content = cookie_content[1:-1]

        # 移除换行符和多余空格
        cookie_content = cookie_content.replace('\n', '').replace('\r', '').strip()

        return cookie_content

    def extract_cookie_from_main_file(self, cookie_file: str) -> str:
        """从主Cookie文件中提取对应的Cookie"""
        import re

        main_cookie_file = Path("config/cookie.txt")

        try:
            with open(main_cookie_file, 'r', encoding='utf-8') as f:
                content = f.read()

            # 根据cookie文件路径确定要提取的cookie
            if 'cookie1' in cookie_file:
                cookie_name = 'cookie1'
            elif 'cookie2' in cookie_file:
                cookie_name = 'cookie2'
            else:
                # 默认使用cookie1
                cookie_name = 'cookie1'

            # 使用正则表达式提取对应的cookie
            pattern = rf'{cookie_name}="(.*?)"'
            match = re.search(pattern, content, re.DOTALL)

            if match:
                cookie_value = match.group(1)
                # 清理Cookie内容
                return self.clean_cookie_content(cookie_value)
            else:
                logger.error(f"[BATCH_CRAWL] 在主Cookie文件中未找到 {cookie_name}")
                return ""

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] 读取主Cookie文件失败: {e}")
            return ""
    
    def execute_category_crawl(self, city_name: str, category: str, max_pages: int) -> Dict[str, Any]:
        """执行品类爬取"""
        result = {
            'category': category,
            'start_time': time.time(),
            'crawled_count': 0,
            'duplicate_count': 0,
            'error_count': 0,
            'pages_crawled': 0,
            'success': False,
            'actual_data': []  # 存储实际爬取的数据
        }

        try:
            logger.info(f"[BATCH_CRAWL] 🔄 开始爬取 {city_name} - {category} (最多{max_pages}页)")

            # 调用实际的爬取逻辑
            crawled_data = self.crawl_city_category_with_pages(city_name, category, max_pages)

            if crawled_data:
                result.update({
                    'crawled_count': len(crawled_data),
                    'pages_crawled': max_pages,
                    'success': True,
                    'end_time': time.time(),
                    'actual_data': crawled_data
                })

                # 更新任务状态
                self.task_status['total_crawled'] += len(crawled_data)
                self.task_status['session_data_count'] += len(crawled_data)
                self.task_status['actual_crawled_data'].extend(crawled_data)

                logger.info(f"[BATCH_CRAWL] ✅ {category} 完成: {len(crawled_data)} 条")
            else:
                logger.warning(f"[BATCH_CRAWL] ⚠️ {category} 未获取到数据")
                result['end_time'] = time.time()

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ {category} 爬取失败: {e}")
            result['error'] = str(e)
            result['end_time'] = time.time()

        return result

    def crawl_city_category_with_pages(self, city_name: str, category: str, max_pages: int) -> List[Dict]:
        """实际的城市品类分页爬取"""
        try:
            logger.info(f"[BATCH_CRAWL] 🔍 开始爬取 {city_name} - {category}")

            # 如果是烤肉品类，先进行专门检查
            if '烤肉' in category:
                logger.info(f"[BATCH_CRAWL] 🥩 检测到烤肉品类，进行专门检查...")
                city_code = self.get_city_code(city_name)
                if city_code and hasattr(self.crawler, 'category_discovery'):
                    roast_check = self.crawler.category_discovery.check_roast_meat_availability(city_name, city_code)
                    if not roast_check['has_roast_meat']:
                        logger.warning(f"[BATCH_CRAWL] ⚠️ {city_name} 不支持烤肉品类，尝试使用烧烤烤串替代")
                        category = '烧烤烤串'  # 使用替代品类

            # 使用直接爬取模式，绕过时段限制
            logger.info(f"[BATCH_CRAWL] 🔍 开始直接爬取模式: {city_name} - {category}")

            # 直接调用爬取方法，不使用智能爬取系统的时段限制
            success = self.crawl_city_category_directly(city_name, category, max_pages)

            if success:
                # 返回真实爬取的数据
                if 'actual_crawled_data' in self.task_status:
                    real_data = self.task_status['actual_crawled_data']
                    logger.info(f"[BATCH_CRAWL] 📊 返回 {len(real_data)} 条真实数据")
                    return real_data
                else:
                    logger.warning(f"[BATCH_CRAWL] 未找到爬取的真实数据")
                    return []
            else:
                logger.warning(f"[BATCH_CRAWL] 直接爬取失败，可能是品类不可用")

            return []

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] 分页爬取异常: {e}")
            return []

    def crawl_city_category_directly(self, city_name: str, category: str, max_pages: int) -> bool:
        """直接爬取城市品类（绕过时段限制）"""
        logger.info(f"[BATCH_CRAWL] 🚀 直接爬取模式: {city_name} - {category}")

        try:
            from playwright.sync_api import sync_playwright

            # 直接创建浏览器，不使用智能爬取系统
            with sync_playwright() as p:
                browser = p.chromium.launch(headless=False)
                context = browser.new_context(
                    user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                    viewport={'width': 1920, 'height': 1080}
                )

                # 添加Cookie
                try:
                    if hasattr(self.crawler, 'cookie_string') and self.crawler.cookie_string:
                        cookies = self.parse_cookie_string(self.crawler.cookie_string)
                        if cookies:
                            context.add_cookies(cookies)
                            logger.info(f"[BATCH_CRAWL] ✅ 添加了 {len(cookies)} 个Cookie")
                except Exception as e:
                    logger.warning(f"[BATCH_CRAWL] Cookie添加失败: {e}")

                page = context.new_page()

                # 构建品类专用URL
                city_code = self.get_city_code(city_name)
                category_id = self.get_category_id(category)

                if category_id:
                    search_url = f"https://www.dianping.com/{city_code}/ch10/{category_id}"
                    logger.info(f"[BATCH_CRAWL] 🌐 访问品类页面: {search_url}")
                    logger.info(f"[BATCH_CRAWL] 🎯 品类: {category} (ID: {category_id})")
                else:
                    search_url = f"https://www.dianping.com/{city_code}/ch10"
                    logger.warning(f"[BATCH_CRAWL] ⚠️ 未找到品类ID，使用通用页面: {search_url}")

                page.goto(search_url, timeout=30000)

                # 等待页面加载
                page.wait_for_load_state('networkidle', timeout=15000)

                # 检测验证码
                captcha_result = self.crawler.detect_captcha(page, city_name, category)
                if captcha_result:
                    logger.info(f"[BATCH_CRAWL] 🔍 验证码处理结果: {captcha_result}")

                # 真实数据提取
                logger.info(f"[BATCH_CRAWL] 🔍 开始提取真实数据: {city_name} - {category}")

                # 使用爬虫核心的数据提取方法
                real_data = []
                page_num = 1
                max_pages_to_crawl = min(max_pages, 5)  # 限制最多爬取5页

                while page_num <= max_pages_to_crawl:
                    try:
                        logger.info(f"[BATCH_CRAWL] 📄 正在提取第 {page_num} 页数据...")

                        # 使用crawler的extract_shop_data方法
                        page_shops = self.crawler.extract_shop_data(page, city_name, category)

                        if page_shops:
                            real_data.extend(page_shops)
                            logger.info(f"[BATCH_CRAWL] ✅ 第{page_num}页: {len(page_shops)} 个商铺")
                        else:
                            logger.warning(f"[BATCH_CRAWL] ⚠️ 第{page_num}页未提取到数据")
                            break

                        # 尝试翻页
                        if page_num < max_pages_to_crawl:
                            try:
                                # 查找下一页按钮
                                next_button = page.query_selector('a.next, .next-page, a:has-text("下一页")')
                                if next_button and next_button.is_visible():
                                    next_button.click()
                                    page.wait_for_load_state('networkidle', timeout=10000)
                                    page_num += 1
                                else:
                                    logger.info(f"[BATCH_CRAWL] 📄 未找到下一页按钮，停止翻页")
                                    break
                            except Exception as e:
                                logger.warning(f"[BATCH_CRAWL] 翻页失败: {e}")
                                break
                        else:
                            break

                    except Exception as e:
                        logger.error(f"[BATCH_CRAWL] 第{page_num}页数据提取失败: {e}")
                        break

                # 保存到任务状态
                if 'actual_crawled_data' not in self.task_status:
                    self.task_status['actual_crawled_data'] = []
                if 'session_data_count' not in self.task_status:
                    self.task_status['session_data_count'] = 0

                self.task_status['actual_crawled_data'].extend(real_data)
                self.task_status['session_data_count'] += len(real_data)

                logger.info(f"[BATCH_CRAWL] 📊 提取 {len(real_data)} 条真实 {category} 数据")
                logger.info(f"[BATCH_CRAWL] 🎯 访问的URL: {search_url}")
                return True

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] 直接爬取异常: {e}")
            return False

    def parse_cookie_string(self, cookie_string: str) -> list:
        """解析Cookie字符串"""
        cookies = []
        try:
            for cookie_pair in cookie_string.split('; '):
                if '=' in cookie_pair:
                    name, value = cookie_pair.split('=', 1)
                    cookies.append({
                        'name': name.strip(),
                        'value': value.strip(),
                        'domain': '.dianping.com',
                        'path': '/'
                    })
        except Exception as e:
            logger.error(f"[BATCH_CRAWL] Cookie解析失败: {e}")

        return cookies

    def get_city_code(self, city_name: str) -> str:
        """获取城市代码"""
        city_code_mapping = {
            '南宁': 'nanning',
            '厦门': 'xiamen',
            '深圳': 'shenzhen',
            '上海': 'shanghai',
            '广州': 'guangzhou',
            '杭州': 'hangzhou',
            '苏州': 'suzhou',
            '成都': 'chengdu',
            '重庆': 'chongqing',
            '武汉': 'wuhan',
            '长沙': 'changsha',
            '西安': 'xian',
        }
        return city_code_mapping.get(city_name, city_name.lower())

    def get_category_id(self, category_name: str) -> str:
        """获取品类ID"""
        # 根据您提供的三个核心品类
        category_id_mapping = {
            '烤肉': 'g34303',
            '日式料理': 'g113',
            '面包蛋糕甜品': 'g117',

            # 其他常见品类
            '火锅': 'g110',
            '川菜': 'g102',
            '粤菜': 'g103',
            '西餐': 'g116',
            '湘菜': 'g104',
            '韩式料理': 'g114',
            '烧烤烤串': 'g508',
            '小吃快餐': 'g112',
            '自助餐': 'g111',
            '咖啡': 'g132',
            '小龙虾': 'g219',
            '鱼鲜海鲜': 'g251'
        }

        category_id = category_id_mapping.get(category_name)
        if category_id:
            logger.info(f"[BATCH_CRAWL] 🎯 品类映射: {category_name} -> {category_id}")
        else:
            logger.warning(f"[BATCH_CRAWL] ⚠️ 未找到品类ID: {category_name}")

        return category_id

    def signal_handler(self, signum, frame):
        """信号处理器 - 处理Ctrl+C等中断信号"""
        logger.info(f"[BATCH_CRAWL] 🛑 接收到中断信号 {signum}")
        logger.info("[BATCH_CRAWL] 正在保存已爬取的数据...")

        # 保存当前已爬取的数据
        self.save_current_data()

        # 生成中断报告
        self.generate_interrupt_report()

        logger.info("[BATCH_CRAWL] 👋 数据已保存，程序即将退出")
        sys.exit(0)

    def save_current_data(self):
        """保存当前已爬取的数据"""
        try:
            if not self.task_status['actual_crawled_data']:
                logger.info("[BATCH_CRAWL] 没有数据需要保存")
                return

            # 创建输出目录
            output_dir = Path(self.config['data_management']['output_data_dir'])
            data_dir = output_dir / "data"
            data_dir.mkdir(parents=True, exist_ok=True)

            # 生成文件名
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

            # 保存CSV格式数据
            csv_file = data_dir / f"batch_crawl_data_{timestamp}.csv"
            self.save_data_as_csv(csv_file, self.task_status['actual_crawled_data'])

            # 同时保存JSON格式（用于调试）
            json_file = data_dir / f"batch_crawl_data_{timestamp}.json"
            save_data = {
                'crawl_info': {
                    'start_time': datetime.fromtimestamp(self.task_status['start_time']).isoformat() if self.task_status['start_time'] else None,
                    'save_time': datetime.now().isoformat(),
                    'total_count': len(self.task_status['actual_crawled_data']),
                    'current_city': self.task_status['current_city'],
                    'current_category': self.task_status['current_category']
                },
                'data': self.task_status['actual_crawled_data']
            }

            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(save_data, f, ensure_ascii=False, indent=2)

            logger.info(f"[BATCH_CRAWL] ✅ 数据已保存:")
            logger.info(f"[BATCH_CRAWL]   CSV格式: {csv_file}")
            logger.info(f"[BATCH_CRAWL]   JSON格式: {json_file}")
            logger.info(f"[BATCH_CRAWL] 📊 保存数据量: {len(self.task_status['actual_crawled_data'])} 条")

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 数据保存失败: {e}")

    def save_data_as_csv(self, csv_file: Path, data: List[Dict]):
        """将数据保存为CSV格式（参考现有文件格式）"""
        try:
            if not data:
                logger.info("[BATCH_CRAWL] 没有数据需要保存为CSV")
                return

            # 使用与现有文件完全一致的CSV字段格式
            csv_fields = [
                'city',
                'primary_category',
                'secondary_category',
                'shop_name',
                'avg_price'
            ]

            with open(csv_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=csv_fields)
                writer.writeheader()

                for item in data:
                    # 转换数据格式以匹配现有CSV格式
                    csv_row = {
                        'city': item.get('city', ''),
                        'primary_category': '美食',  # 固定为美食，与现有格式一致
                        'secondary_category': item.get('category', item.get('secondary_category', '')),
                        'shop_name': item.get('shop_name', item.get('name', '')),
                        'avg_price': item.get('avg_price', item.get('price', ''))
                    }

                    # 确保所有字段都有值，空值用空字符串
                    for field in csv_fields:
                        if csv_row[field] is None:
                            csv_row[field] = ''

                    writer.writerow(csv_row)

            logger.info(f"[BATCH_CRAWL] ✅ CSV文件保存成功: {csv_file}")
            logger.info(f"[BATCH_CRAWL] 📊 CSV格式: {len(data)} 条数据，5个字段")

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ CSV保存失败: {e}")
            import traceback
            traceback.print_exc()

    def generate_interrupt_report(self):
        """生成中断报告"""
        try:
            output_dir = Path(self.config['data_management']['output_data_dir'])
            reports_dir = output_dir / "reports"
            reports_dir.mkdir(parents=True, exist_ok=True)

            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            report_file = reports_dir / f"interrupt_report_{timestamp}.json"

            interrupt_time = time.time()
            duration = interrupt_time - self.task_status['start_time'] if self.task_status['start_time'] else 0

            report = {
                'interrupt_info': {
                    'interrupt_time': datetime.fromtimestamp(interrupt_time).isoformat(),
                    'start_time': datetime.fromtimestamp(self.task_status['start_time']).isoformat() if self.task_status['start_time'] else None,
                    'duration_seconds': duration,
                    'duration_formatted': f"{duration/60:.1f}分钟"
                },
                'progress': {
                    'current_city': self.task_status['current_city'],
                    'current_category': self.task_status['current_category'],
                    'total_crawled': len(self.task_status['actual_crawled_data']),
                    'city_results': self.task_status['city_results']
                },
                'task_config': self.config['task_info']
            }

            with open(report_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)

            logger.info(f"[BATCH_CRAWL] 📋 中断报告已保存: {report_file}")

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 中断报告生成失败: {e}")

    def category_interval_wait(self):
        """品类间间隔等待"""
        intervals = self.config['safety_config']['category_intervals']
        base_delay = intervals['base_delay']
        random_factor = intervals['random_factor']

        delay = base_delay * random.uniform(1 - random_factor, 1 + random_factor)
        delay = max(intervals['min_delay'], min(intervals['max_delay'], delay))

        logger.info(f"[BATCH_CRAWL] ⏰ 品类间间隔等待: {delay:.0f}秒 ({delay/60:.1f}分钟)")

        # 分段等待，便于中断
        wait_segments = max(1, int(delay / 30))  # 每段30秒
        segment_time = delay / wait_segments

        for i in range(wait_segments):
            time.sleep(segment_time)
            remaining = delay - (i + 1) * segment_time
            if remaining > 30:
                logger.debug(f"[BATCH_CRAWL] 品类间隔剩余: {remaining:.0f}秒")

    def city_interval_wait(self):
        """城市间间隔等待"""
        intervals = self.config['safety_config']['city_intervals']
        base_delay = intervals['base_delay']
        random_factor = intervals['random_factor']

        delay = base_delay * random.uniform(1 - random_factor, 1 + random_factor)
        delay = max(intervals['min_delay'], min(intervals['max_delay'], delay))

        logger.info(f"[BATCH_CRAWL] ⏰ 城市间间隔等待: {delay:.0f}秒 ({delay/60:.1f}分钟)")

        # 分段等待，便于中断
        wait_segments = 12  # 分12段，每段5分钟
        segment_time = delay / wait_segments

        for i in range(wait_segments):
            time.sleep(segment_time)
            remaining = delay - (i + 1) * segment_time
            if remaining > 60:
                logger.debug(f"[BATCH_CRAWL] 剩余等待时间: {remaining/60:.1f}分钟")

    def ask_continue_on_error(self, city_name: str) -> bool:
        """询问错误时是否继续"""
        print(f"\n⚠️ 城市 {city_name} 爬取失败")

        while True:
            try:
                choice = input("是否继续下一个城市? (y/n): ").strip().lower()

                if choice in ['y', 'yes', '是', '继续']:
                    return True
                elif choice in ['n', 'no', '否', '停止']:
                    return False
                else:
                    print("❌ 请输入 y(继续) 或 n(停止)")

            except KeyboardInterrupt:
                return False

    def generate_final_report(self):
        """生成最终报告"""
        logger.info("[BATCH_CRAWL] 📊 生成最终报告")

        end_time = time.time()
        total_duration = end_time - self.task_status['start_time']

        report = {
            'task_info': self.config['task_info'],
            'execution_summary': {
                'start_time': datetime.fromtimestamp(self.task_status['start_time']).isoformat(),
                'end_time': datetime.fromtimestamp(end_time).isoformat(),
                'total_duration_seconds': total_duration,
                'total_duration_formatted': f"{total_duration/3600:.1f}小时",
                'total_crawled': self.task_status['total_crawled']
            },
            'city_results': self.task_status['city_results'],
            'deduplication_report': self.deduplicator.get_deduplication_report()
        }

        # 保存报告
        output_dir = Path(self.config['data_management']['output_data_dir'])
        reports_dir = output_dir / "reports"
        reports_dir.mkdir(parents=True, exist_ok=True)

        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

        # 保存JSON格式报告
        json_report_file = reports_dir / f"batch_crawl_report_{timestamp}.json"

        # 保存CSV格式数据
        csv_data_file = reports_dir / f"batch_crawl_data_{timestamp}.csv"

        try:
            # 保存JSON报告
            with open(json_report_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)

            # 保存CSV数据
            if self.task_status['actual_crawled_data']:
                self.save_data_as_csv(csv_data_file, self.task_status['actual_crawled_data'])

            logger.info(f"[BATCH_CRAWL] ✅ 最终报告已保存:")
            logger.info(f"[BATCH_CRAWL]   JSON报告: {json_report_file}")
            if self.task_status['actual_crawled_data']:
                logger.info(f"[BATCH_CRAWL]   CSV数据: {csv_data_file}")

        except Exception as e:
            logger.error(f"[BATCH_CRAWL] ❌ 保存最终报告失败: {e}")

        # 打印摘要
        self.print_execution_summary(report)

    def print_execution_summary(self, report: Dict[str, Any]):
        """打印执行摘要"""
        summary = report['execution_summary']
        city_results = report['city_results']

        print("\n" + "="*80)
        print("📊 批量爬取任务执行摘要")
        print("="*80)

        print(f"⏱️  执行时间: {summary['total_duration_formatted']}")
        print(f"📊 总爬取量: {summary['total_crawled']} 条")

        print(f"\n🏙️ 城市结果:")
        for city_name, city_result in city_results.items():
            status = "✅ 成功" if city_result.get('success', False) else "❌ 失败"
            count = city_result.get('total_crawled', 0)
            print(f"  {city_name}: {status} - {count} 条")

            # 品类详情
            categories = city_result.get('categories', {})
            for category, cat_result in categories.items():
                cat_count = cat_result.get('crawled_count', 0)
                print(f"    {category}: {cat_count} 条")

        print("="*80)

    def run(self):
        """运行批量爬取任务"""
        try:
            # 显示欢迎界面
            self.print_welcome_banner()

            # 初始化系统
            if not self.initialize_system():
                print("❌ 系统初始化失败，退出程序")
                return 1

            # 显示爬取前分析
            self.show_pre_crawl_analysis()

            # 确认执行计划
            if not self.confirm_execution_plan():
                print("👋 用户取消任务，退出程序")
                return 0

            # 执行批量爬取
            success = self.execute_batch_crawl()

            if success:
                print("\n🎉 批量爬取任务成功完成！")
                return 0
            else:
                print("\n😞 批量爬取任务失败，请检查日志")
                return 1

        except KeyboardInterrupt:
            print("\n👋 用户中断程序，再见！")
            return 0
        except Exception as e:
            print(f"\n❌ 程序异常: {e}")
            logger.error(f"程序异常: {e}")
            return 1

def main():
    """主函数"""
    launcher = BatchCrawlLauncher()
    return launcher.run()

if __name__ == "__main__":
    sys.exit(main())
