#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
主爬虫执行脚本
"""

import sys
import asyncio
import logging
from pathlib import Path
from datetime import datetime
from typing import Dict, Any

# 添加项目根目录到Python路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.crawler.crawler_engine import CrawlerEngine
from src.data.data_processor import DataProcessor
from src.data.data_storage import DataStorageManager
from src.data.data_cleaner import DataCleaner
from src.data.data_deduplicator import DataDeduplicator
from src.utils.excel_exporter import ExcelExporter
from src.utils.csv_exporter import CSVExporter  # 🔥 新增：CSV导出功能
from config.settings import LOGGING_CONFIG


class MainCrawler:
    """主爬虫类"""
    
    def __init__(self):
        self.setup_logging()
        self.logger = logging.getLogger(__name__)
        
        self.crawler_engine = CrawlerEngine()
        self.data_processor = DataProcessor()
        self.data_storage = DataStorageManager()
        self.data_cleaner = DataCleaner()
        self.data_deduplicator = DataDeduplicator()
        self.excel_exporter = ExcelExporter()
        self.csv_exporter = CSVExporter()  # 🔥 新增：CSV导出器
    
    def setup_logging(self):
        """设置日志"""
        import logging.config
        logging.config.dictConfig(LOGGING_CONFIG)
    
    async def crawl_city_business(self, city: str = "深圳", business_type: str = "餐饮",
                                 max_pages: int = None, headless: bool = True) -> Dict[str, Any]:
        """爬取指定城市和业态的数据"""
        task_name = f"{city}{business_type}数据爬取_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        task_id = 0

        try:
            # 创建任务记录
            task_id = self.data_storage.create_crawl_task(task_name, city, business_type)

            self.logger.info(f"开始爬取任务: {task_name}")

            # 启动浏览器
            await self.crawler_engine.start(headless=headless)

            # 爬取数据
            raw_data = await self.crawler_engine.crawl_city_business(
                city=city,
                business_type=business_type,
                max_pages=max_pages
            )

            if not raw_data:
                self.logger.warning("未获取到任何数据")
                return {
                    'success': False,
                    'message': '未获取到任何数据',
                    'task_id': task_id,
                    'raw_count': 0
                }

            self.logger.info(f"原始数据获取完成，共 {len(raw_data)} 条")

            # 保存数据（包含清洗和去重）
            save_stats = self.data_storage.save_leads_data(raw_data, task_id)

            # 导出Excel
            excel_file = ""
            if save_stats['final_count'] > 0:
                # 获取处理后的数据用于导出
                processed_data = self.data_cleaner.clean_data_batch(raw_data)
                deduplicated_data = self.data_deduplicator.deduplicate_data(processed_data)

                timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
                filename = f"{city}_{business_type}_leads_{timestamp}.xlsx"
                excel_file = await self.excel_exporter.export_to_excel(deduplicated_data, filename)

            # 更新任务状态
            self.data_storage.update_crawl_task_status(task_id, 'completed',
                                                     f"成功获取 {save_stats['final_count']} 条有效数据")

            return {
                'success': True,
                'message': f'爬取完成，获取 {save_stats["final_count"]} 条有效数据',
                'task_id': task_id,
                'raw_count': len(raw_data),
                'save_stats': save_stats,
                'excel_file': excel_file
            }

        except Exception as e:
            error_msg = f"爬取失败: {str(e)}"
            self.logger.error(error_msg)

            if task_id:
                self.data_storage.update_crawl_task_status(task_id, 'failed', error_msg)

            return {
                'success': False,
                'message': error_msg,
                'task_id': task_id,
                'raw_count': 0
            }
        finally:
            await self.crawler_engine.stop()

    async def crawl_shenzhen_catering(self, max_pages: int = None, headless: bool = True) -> Dict[str, Any]:
        """爬取深圳餐饮数据（保持向后兼容）"""
        return await self.crawl_city_business("深圳", "餐饮", max_pages, headless)

    async def crawl_multiple_cities_and_business_types(self, cities: list = None, business_types: list = None,
                                                      max_pages: int = None, headless: bool = True) -> Dict[str, Any]:
        """🔥 新增：多城市多业态爬取方法"""
        if cities is None:
            cities = ['深圳', '杭州']
        if business_types is None:
            business_types = ['餐饮', '零售']

        return await self.crawler_engine.crawl_multiple_cities_and_business_types(
            cities=cities,
            business_types=business_types,
            max_pages=max_pages
        )


    async def test_crawler_basic(self) -> bool:
        """测试爬虫基本功能"""
        try:
            self.logger.info("开始测试爬虫基本功能...")
            
            # 启动爬虫引擎
            await self.crawler_engine.start(headless=True)
            
            # 测试页面访问
            target_url = "https://sz.pupuwang.com/siting/list?industryCode=11"
            success = await self.crawler_engine.browser_manager.navigate_to(target_url)
            
            if not success:
                self.logger.error("页面访问测试失败")
                return False
            
            # 测试数据提取
            page_data = await self.crawler_engine.page_parser.extract_list_data(
                self.crawler_engine.browser_manager.page
            )
            
            if not page_data:
                self.logger.error("数据提取测试失败")
                return False
            
            self.logger.info(f"测试成功，提取到 {len(page_data)} 条数据")
            
            # 测试数据处理
            processed_data = self.data_processor.process_crawled_data(page_data[:1])
            
            if not processed_data:
                self.logger.error("数据处理测试失败")
                return False
            
            self.logger.info("爬虫基本功能测试通过")
            return True
            
        except Exception as e:
            self.logger.error(f"爬虫测试失败: {e}")
            return False
            
        finally:
            await self.crawler_engine.stop()
    
    def get_database_summary(self) -> Dict[str, Any]:
        """获取数据库摘要"""
        try:
            stats = self.data_storage.get_database_stats()
            
            # 获取最新的深圳餐饮数据
            latest_data = self.data_storage.get_leads_data(
                city="深圳", 
                business_type="餐饮", 
                limit=10
            )
            
            summary = {
                'database_stats': stats,
                'latest_shenzhen_catering': latest_data,
                'summary_text': f"数据库中共有 {stats.get('total_leads', 0)} 条线索数据，"
                               f"其中活跃数据 {stats.get('active_leads', 0)} 条，"
                               f"涵盖 {stats.get('cities_count', 0)} 个城市，"
                               f"{stats.get('business_types_count', 0)} 种业态。"
            }
            
            return summary
            
        except Exception as e:
            self.logger.error(f"获取数据库摘要失败: {e}")
            return {}


async def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='商业地产爬虫系统 - 已集成数据质量修复功能')
    parser.add_argument('--action', choices=['crawl', 'multi', 'test', 'summary'],
                       default='crawl', help='执行动作: crawl=单城市爬取, multi=多城市爬取, test=测试, summary=摘要')
    parser.add_argument('--city', default='深圳', help='城市名称 (默认: 深圳)')
    parser.add_argument('--business-type', default='餐饮', help='业态类型 (默认: 餐饮)')
    parser.add_argument('--cities', nargs='+', default=['深圳', '杭州'], help='多城市列表 (默认: 深圳 杭州)')
    parser.add_argument('--business-types', nargs='+', default=['餐饮', '零售'], help='多业态列表 (默认: 餐饮 零售)')
    parser.add_argument('--max-pages', type=int, default=None,
                       help='最大爬取页数')
    parser.add_argument('--headless', action='store_true', default=True,
                       help='无头模式运行')
    parser.add_argument('--show-browser', action='store_true',
                       help='显示浏览器窗口')
    
    args = parser.parse_args()
    
    # 创建主爬虫实例
    main_crawler = MainCrawler()
    
    try:
        if args.action == 'test':
            # 测试模式
            success = await main_crawler.test_crawler_basic()
            if success:
                print("✅ 爬虫功能测试通过")
                return 0
            else:
                print("❌ 爬虫功能测试失败")
                return 1
                
        elif args.action == 'summary':
            # 摘要模式
            summary = main_crawler.get_database_summary()
            print("📊 数据库摘要:")
            print(summary.get('summary_text', '无数据'))
            return 0
            
        elif args.action == 'multi':
            # 🔥 新增：多城市多业态爬取模式
            headless = args.headless and not args.show_browser
            print(f"🚀 开始多城市多业态爬取:")
            print(f"   城市: {', '.join(args.cities)}")
            print(f"   业态: {', '.join(args.business_types)}")
            print(f"   最大页数: {args.max_pages or '不限制'}")

            result = await main_crawler.crawl_multiple_cities_and_business_types(
                cities=args.cities,
                business_types=args.business_types,
                max_pages=args.max_pages
            )

            if result.get('success', False):
                print("🎉 多城市爬取任务完成!")
                print(f"📊 总体成功率: {result.get('overall_success_rate', 0):.1f}%")
                return 0
            else:
                print(f"❌ 多城市爬取任务失败: {result.get('message', '未知错误')}")
                return 1

        else:
            # 🔥 更新：单城市单业态爬取模式（使用修复后的功能）
            headless = args.headless and not args.show_browser
            print(f"🚀 开始爬取: {args.city} - {args.business_type}")
            print(f"   最大页数: {args.max_pages or '不限制'}")
            print(f"   数据质量修复: ✅ 已启用")

            result = await main_crawler.crawl_city_business(
                city=args.city,
                business_type=args.business_type,
                max_pages=args.max_pages,
                headless=headless
            )

            if result['success']:
                print("🎉 爬取任务完成!")
                print(f"📁 Excel文件: {result.get('excel_file', '未生成')}")
                print(f"📊 数据统计: 新增 {result['save_stats']['new']} 条，更新 {result['save_stats']['updated']} 条")
                print(f"📋 原始数据: {result['raw_count']} 条")
                return 0
            else:
                print(f"❌ 爬取任务失败: {result.get('message')}")
                return 1
                
    except KeyboardInterrupt:
        print("\n⚠️ 用户中断操作")
        return 1
    except Exception as e:
        print(f"❌ 执行失败: {e}")
        return 1


if __name__ == "__main__":
    exit_code = asyncio.run(main())
    sys.exit(exit_code)
