# -*- coding: utf-8 -*-
"""
爬虫引擎核心模块
"""

import asyncio
import logging
from typing import List, Dict, Any, Optional
from urllib.parse import urljoin

from .browser_manager import BrowserManager
from .page_parser import PageParser
from .pagination_handler import PaginationHandler
from .business_type_manager import BusinessTypeManager, BusinessTypeStatus
from .city_manager import CityManager, CityStatus
from src.utils.date_filter import DateFilter, SmartPaginationController
from src.utils.csv_exporter import CSVExporter, DataDisplayManager
from config.settings import TARGET_SITES, TARGET_BUSINESS_TYPES


class CrawlerEngine:
    """爬虫引擎核心类"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.browser_manager = BrowserManager()
        self.page_parser = PageParser()
        self.pagination_handler = PaginationHandler()
        self.business_type_manager = BusinessTypeManager()
        self.city_manager = CityManager()
        self.date_filter = DateFilter(days_limit=30)  # 30天时间限制
        self.pagination_controller = SmartPaginationController(self.date_filter)
        self.csv_exporter = CSVExporter()  # CSV导出器
        self.display_manager = DataDisplayManager()  # 数据显示管理器
        self.base_url = TARGET_SITES['pupuwang']['base_url']
    
    async def start(self, headless: bool = True) -> None:
        """启动爬虫引擎"""
        try:
            await self.browser_manager.start(headless)
            self.logger.info("爬虫引擎启动成功")
        except Exception as e:
            self.logger.error(f"爬虫引擎启动失败: {e}")
            raise
    
    async def stop(self) -> None:
        """停止爬虫引擎"""
        try:
            await self.browser_manager.close()
            self.logger.info("爬虫引擎已停止")
        except Exception as e:
            self.logger.error(f"停止爬虫引擎时出错: {e}")
    
    async def crawl_city_business(self, city: str, business_type: str, max_pages: int = None) -> List[Dict[str, Any]]:
        """爬取指定城市和业态的数据"""
        try:
            self.logger.info(f"开始爬取 {city} - {business_type} 数据")

            # 切换到指定城市
            if not self.city_manager.switch_city(city):
                self.logger.error(f"城市切换失败: {city}")
                return []

            # 切换到指定业态
            if not self.business_type_manager.switch_business_type(business_type):
                self.logger.error(f"业态切换失败: {business_type}")
                return []

            # 设置城市和业态状态为处理中
            self.city_manager.set_city_status(city, CityStatus.PROCESSING)
            self.business_type_manager.set_business_type_status(business_type, BusinessTypeStatus.PROCESSING)

            # 构建目标URL
            target_url = self._build_target_url(city, business_type)

            # 导航到目标页面
            success = await self.browser_manager.navigate_to(target_url)
            if not success:
                self.logger.error("页面导航失败")
                self.city_manager.set_city_status(city, CityStatus.FAILED)
                self.business_type_manager.set_business_type_status(business_type, BusinessTypeStatus.FAILED)
                self.city_manager.increment_city_error_count(city)
                self.business_type_manager.increment_error_count(business_type)
                return []

            # 应用筛选条件
            await self._apply_filters(business_type)

            # 爬取所有页面数据
            all_data = await self._crawl_all_pages(max_pages)

            # 添加原始数据到管理器
            self.business_type_manager.add_business_type_data(business_type, all_data, 'raw')
            self.city_manager.add_city_business_data(city, business_type, all_data)

            # 补充详情页数据
            enriched_data = await self._enrich_with_detail_data(all_data)

            # 应用业态特定处理
            business_processed_data = self.business_type_manager.apply_business_type_specific_processing(business_type, enriched_data)

            # 应用城市特定处理
            city_processed_data = self.city_manager.apply_city_specific_processing(city, business_processed_data)

            # 添加处理后数据到管理器
            self.business_type_manager.add_business_type_data(business_type, city_processed_data, 'processed')

            # 🔥 新增：导出CSV和显示数据
            if city_processed_data:
                # 导出CSV文件
                csv_file = self.csv_exporter.export_to_csv(
                    city_processed_data, city, business_type, append_mode=False
                )

                # 显示数据样本
                self.csv_exporter.print_data_sample(city_processed_data, sample_size=10)

                # 获取过滤统计信息
                filter_stats = self.date_filter.get_filter_stats(all_data, city_processed_data)

                # 显示完成摘要
                self.display_manager.print_completion_summary(
                    city, business_type, len(city_processed_data), csv_file, filter_stats
                )

            # 设置状态为完成
            self.business_type_manager.set_business_type_status(business_type, BusinessTypeStatus.COMPLETED)
            self.city_manager.set_city_status(city, CityStatus.COMPLETED)

            self.logger.info(f"爬取完成，共获取 {len(city_processed_data)} 条数据")
            return city_processed_data

        except Exception as e:
            self.logger.error(f"爬取数据失败: {e}")
            self.city_manager.set_city_status(city, CityStatus.FAILED)
            self.business_type_manager.set_business_type_status(business_type, BusinessTypeStatus.FAILED)
            self.city_manager.increment_city_error_count(city)
            self.business_type_manager.increment_error_count(business_type)
            return []
    
    async def crawl_shenzhen_catering(self, max_pages: int = None) -> List[Dict[str, Any]]:
        """爬取深圳餐饮数据（专用方法）"""
        return await self.crawl_city_business("深圳", "餐饮", max_pages)

    async def crawl_multiple_business_types(self, city: str, business_types: List[str], max_pages: int = None) -> Dict[str, Any]:
        """连续爬取多个业态的数据"""
        try:
            self.logger.info(f"开始连续爬取 {city} 的 {len(business_types)} 种业态数据")

            # 重置业态管理器会话
            self.business_type_manager.reset_session()

            results = {}
            total_data = []

            for business_type in business_types:
                self.logger.info(f"开始爬取业态: {business_type}")

                try:
                    # 爬取单个业态数据
                    business_data = await self.crawl_city_business(city, business_type, max_pages)

                    # 验证业态数据
                    validation_result = self.business_type_manager.validate_business_type_data(business_type, business_data)

                    results[business_type] = {
                        'success': True,
                        'data_count': len(business_data),
                        'data': business_data,
                        'validation': validation_result
                    }

                    total_data.extend(business_data)
                    self.logger.info(f"业态 {business_type} 爬取完成，获取 {len(business_data)} 条数据")

                except Exception as e:
                    self.logger.error(f"业态 {business_type} 爬取失败: {e}")
                    results[business_type] = {
                        'success': False,
                        'error': str(e),
                        'data_count': 0,
                        'data': []
                    }

                    # 增加错误计数
                    self.business_type_manager.increment_error_count(business_type)

            # 生成统计信息
            stats = self.business_type_manager.generate_business_type_stats()

            summary = {
                'city': city,
                'business_types': business_types,
                'total_data_count': len(total_data),
                'results': results,
                'stats': stats,
                'success_rate': len([r for r in results.values() if r['success']]) / len(business_types) * 100
            }

            self.logger.info(f"多业态爬取完成，总计获取 {len(total_data)} 条数据")
            return summary

        except Exception as e:
            self.logger.error(f"多业态爬取失败: {e}")
            return {
                'city': city,
                'business_types': business_types,
                'total_data_count': 0,
                'results': {},
                'error': str(e),
                'success_rate': 0
            }

    async def crawl_multiple_cities(self, cities: List[str], business_type: str, max_pages: int = None) -> Dict[str, Any]:
        """连续爬取多个城市的数据"""
        try:
            self.logger.info(f"开始连续爬取 {len(cities)} 个城市的 {business_type} 业态数据")

            # 重置城市管理器会话
            self.city_manager.reset_session()

            results = {}
            total_data = []

            for city in cities:
                self.logger.info(f"开始爬取城市: {city}")

                try:
                    # 爬取单个城市数据
                    city_data = await self.crawl_city_business(city, business_type, max_pages)

                    # 验证城市数据
                    validation_result = self.city_manager.validate_city_data(city, city_data)

                    results[city] = {
                        'success': True,
                        'data_count': len(city_data),
                        'data': city_data,
                        'validation': validation_result
                    }

                    total_data.extend(city_data)
                    self.logger.info(f"城市 {city} 爬取完成，获取 {len(city_data)} 条数据")

                except Exception as e:
                    self.logger.error(f"城市 {city} 爬取失败: {e}")
                    results[city] = {
                        'success': False,
                        'error': str(e),
                        'data_count': 0,
                        'data': []
                    }

                    # 增加错误计数
                    self.city_manager.increment_city_error_count(city)

            # 生成统计信息
            city_stats = self.city_manager.generate_city_stats()
            business_stats = self.business_type_manager.generate_business_type_stats()

            summary = {
                'business_type': business_type,
                'cities': cities,
                'total_data_count': len(total_data),
                'results': results,
                'city_stats': city_stats,
                'business_stats': business_stats,
                'success_rate': len([r for r in results.values() if r['success']]) / len(cities) * 100
            }

            self.logger.info(f"多城市爬取完成，总计获取 {len(total_data)} 条数据")
            return summary

        except Exception as e:
            self.logger.error(f"多城市爬取失败: {e}")
            return {
                'business_type': business_type,
                'cities': cities,
                'total_data_count': 0,
                'results': {},
                'error': str(e),
                'success_rate': 0
            }

    async def crawl_multiple_cities_and_business_types(self, cities: List[str], business_types: List[str], max_pages: int = None) -> Dict[str, Any]:
        """🔥 修复：连续爬取多个城市和多个业态的数据"""
        try:
            self.logger.info(f"开始连续爬取 {len(cities)} 个城市的 {len(business_types)} 种业态数据")

            # 🔥 修复：启动浏览器
            await self.start(headless=True)

            # 重置管理器会话
            self.city_manager.reset_session()
            self.business_type_manager.reset_session()

            results = {}
            total_data = []

            for city in cities:
                self.logger.info(f"开始爬取城市: {city}")
                city_results = {}
                city_total_data = []

                for business_type in business_types:
                    self.logger.info(f"开始爬取 {city} - {business_type}")

                    try:
                        # 爬取单个城市业态数据
                        data = await self.crawl_city_business(city, business_type, max_pages)

                        # 验证数据
                        city_validation = self.city_manager.validate_city_data(city, data)
                        business_validation = self.business_type_manager.validate_business_type_data(business_type, data)

                        city_results[business_type] = {
                            'success': True,
                            'data_count': len(data),
                            'data': data,
                            'city_validation': city_validation,
                            'business_validation': business_validation
                        }

                        city_total_data.extend(data)
                        self.logger.info(f"{city} - {business_type} 爬取完成，获取 {len(data)} 条数据")

                    except Exception as e:
                        self.logger.error(f"{city} - {business_type} 爬取失败: {e}")
                        city_results[business_type] = {
                            'success': False,
                            'error': str(e),
                            'data_count': 0,
                            'data': []
                        }

                results[city] = {
                    'business_results': city_results,
                    'total_data_count': len(city_total_data),
                    'success_rate': len([r for r in city_results.values() if r['success']]) / len(business_types) * 100
                }

                total_data.extend(city_total_data)

            # 生成统计信息
            city_stats = self.city_manager.generate_city_stats()
            business_stats = self.business_type_manager.generate_business_type_stats()

            summary = {
                'cities': cities,
                'business_types': business_types,
                'total_data_count': len(total_data),
                'results': results,
                'city_stats': city_stats,
                'business_stats': business_stats,
                'overall_success_rate': len([city_result for city_result in results.values()
                                           if city_result['success_rate'] > 0]) / len(cities) * 100
            }

            self.logger.info(f"多城市多业态爬取完成，总计获取 {len(total_data)} 条数据")
            return summary

        except Exception as e:
            self.logger.error(f"多城市多业态爬取失败: {e}")
            return {
                'cities': cities,
                'business_types': business_types,
                'total_data_count': 0,
                'results': {},
                'error': str(e),
                'overall_success_rate': 0
            }
        finally:
            # 🔥 修复：确保浏览器关闭
            await self.stop()
    
    def _build_target_url(self, city: str, business_type: str) -> str:
        """构建目标URL"""
        # 使用城市管理器获取城市代码和域名
        city_code = self.city_manager.get_city_code(city)
        city_domain = self.city_manager.get_city_domain(city)

        # 使用业态管理器获取业态代码
        business_code = self.business_type_manager.get_business_type_code(business_type)

        # 构建URL
        base_url = f"https://{city_domain}/siting/list"
        target_url = f"{base_url}?industryCode={business_code}"

        self.logger.info(f"目标URL: {target_url}")
        return target_url
    
    async def _apply_filters(self, business_type: str) -> None:
        """应用筛选条件"""
        try:
            # 等待页面加载完成
            await asyncio.sleep(3)
            
            # 使用业态管理器获取显示名称
            filter_name = self.business_type_manager.get_business_type_display_name(business_type)
            
            # 尝试点击筛选选项
            page = self.browser_manager.page
            filter_selector = f'radio[name*="{filter_name}"], input[value*="{filter_name}"], text="{filter_name}"'
            
            try:
                await page.wait_for_selector('radio', timeout=5000)
                
                # 查找业态选项
                business_option = page.get_by_role('radio', name=filter_name)
                if await business_option.count() > 0:
                    await business_option.click()
                    await page.wait_for_load_state('networkidle')
                    self.logger.info(f"成功应用 {filter_name} 筛选")
                else:
                    self.logger.warning(f"未找到 {filter_name} 筛选选项")
                    
            except Exception as e:
                self.logger.warning(f"应用筛选失败，继续执行: {e}")
            
        except Exception as e:
            self.logger.error(f"应用筛选条件失败: {e}")
    
    async def _crawl_all_pages(self, max_pages: int = None) -> List[Dict[str, Any]]:
        """爬取所有页面数据 - 集成时间过滤的智能分页处理"""
        all_data = []

        try:
            # 重置分页状态和控制器
            self.pagination_handler.reset_pagination_state()
            self.pagination_controller.reset()

            # 分析分页机制
            pagination_info = await self.pagination_handler.analyze_pagination(self.browser_manager.page)

            if pagination_info.get('has_pagination'):
                self.logger.info(f"检测到分页: {pagination_info.get('pagination_type')}")
                if pagination_info.get('total_pages'):
                    self.logger.info(f"总页数: {pagination_info['total_pages']}")
            else:
                self.logger.info("未检测到分页，只处理当前页")

            # 开始爬取页面
            while True:
                try:
                    current_page = self.pagination_handler.current_page
                    self.logger.info(f"爬取第 {current_page} 页")

                    # 等待页面加载
                    await asyncio.sleep(2)

                    # 提取当前页数据
                    page_data = await self.page_parser.extract_list_data(self.browser_manager.page)

                    if not page_data:
                        self.logger.warning(f"第 {current_page} 页无数据")
                        # 如果是第一页就没有数据，直接退出
                        if current_page == 1:
                            self.logger.warning("第一页无数据，停止爬取")
                            break
                        # 如果不是第一页，可能是到了最后一页
                        else:
                            self.logger.info("可能已到最后一页，停止爬取")
                            break

                    # 🔥 新增：时间过滤检查
                    # 检查当前页数据是否应该继续分页
                    should_continue = self.pagination_controller.should_continue_pagination(page_data)
                    if not should_continue:
                        self.logger.info("根据时间过滤策略，停止分页爬取")
                        # 仍然保留当前页的有效数据
                        valid_data = self.date_filter.filter_data_by_date(page_data)
                        all_data.extend(valid_data)
                        self.logger.info(f"第 {current_page} 页过滤后获取 {len(valid_data)} 条有效数据")
                        break

                    # 过滤当前页数据
                    valid_data = self.date_filter.filter_data_by_date(page_data)
                    all_data.extend(valid_data)

                    self.logger.info(f"第 {current_page} 页获取 {len(page_data)} 条数据，过滤后 {len(valid_data)} 条有效，累计 {len(all_data)} 条")

                    # 更新管理器的页面计数
                    current_business_type = self.business_type_manager.get_current_business_type()
                    current_city = self.city_manager.get_current_city()
                    if current_business_type:
                        self.business_type_manager.increment_page_count(current_business_type)
                    if current_city:
                        self.city_manager.increment_city_page_count(current_city)

                    # 检查是否达到最大页数限制
                    if max_pages and current_page >= max_pages:
                        self.logger.info(f"达到最大页数限制 {max_pages}，停止爬取")
                        break

                    # 如果没有分页，只处理第一页
                    if not pagination_info.get('has_pagination'):
                        self.logger.info("无分页机制，只处理第一页")
                        break

                    # 尝试翻页
                    has_next = await self.pagination_handler.go_to_next_page(self.browser_manager.page)
                    if not has_next:
                        self.logger.info("没有更多页面，爬取完成")
                        break

                    # 随机延时
                    await self.browser_manager.random_delay()

                except Exception as e:
                    self.logger.error(f"爬取第 {current_page} 页失败: {e}")
                    # 如果是第一页就失败，直接退出
                    if current_page == 1:
                        break
                    # 否则继续尝试下一页
                    continue

            # 输出分页统计信息
            pagination_status = self.pagination_handler.get_pagination_status()
            filter_stats = self.date_filter.get_filter_stats([], all_data)  # 简化统计

            self.logger.info(f"分页爬取完成: {pagination_status}")
            self.logger.info(f"时间过滤统计: 最终获取 {len(all_data)} 条有效数据 (截止日期: {filter_stats['cutoff_date']})")

        except Exception as e:
            self.logger.error(f"分页爬取过程失败: {e}")

        return all_data
    
    async def _go_to_next_page(self) -> bool:
        """翻到下一页 - 已弃用，使用 PaginationHandler 替代"""
        try:
            page = self.browser_manager.page
            
            # 查找下一页按钮
            next_selectors = [
                'button:has-text("下一页")',
                'a:has-text("下一页")',
                'button:has-text(">")',
                'a:has-text(">")',
                '.next',
                '.page-next'
            ]
            
            for selector in next_selectors:
                try:
                    next_button = page.locator(selector)
                    if await next_button.count() > 0:
                        # 检查按钮是否可点击
                        is_disabled = await next_button.get_attribute('disabled')
                        if not is_disabled:
                            await next_button.click()
                            await page.wait_for_load_state('networkidle')
                            return True
                except Exception:
                    continue
            
            # 如果没有找到下一页按钮，尝试查找页码
            page_numbers = page.locator('button:has-text(regex="\\d+"), a:has-text(regex="\\d+")')
            count = await page_numbers.count()
            
            if count > 1:
                # 点击最后一个页码（通常是下一页）
                last_page = page_numbers.nth(count - 1)
                await last_page.click()
                await page.wait_for_load_state('networkidle')
                return True
            
            return False
            
        except Exception as e:
            self.logger.error(f"翻页失败: {e}")
            return False
    
    async def _enrich_with_detail_data(self, list_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """补充详情页数据"""
        enriched_data = []
        
        for i, item in enumerate(list_data):
            try:
                self.logger.info(f"处理第 {i+1}/{len(list_data)} 条数据")
                
                # 复制基础数据
                enriched_item = item.copy()
                
                # 如果有详情页URL，访问详情页获取更多信息
                detail_url = item.get('detail_url')
                if detail_url:
                    detail_data = await self._get_detail_data(detail_url)
                    
                    # 合并详情页数据，详情页数据优先
                    for key, value in detail_data.items():
                        if value:  # 只有当详情页数据不为空时才覆盖
                            enriched_item[key] = value
                
                # 数据清理和标准化
                enriched_item = self._clean_data(enriched_item)
                
                enriched_data.append(enriched_item)
                
                # 随机延时避免请求过快
                await self.browser_manager.random_delay()
                
            except Exception as e:
                self.logger.error(f"处理第 {i+1} 条数据失败: {e}")
                # 即使详情页失败，也保留列表页数据
                enriched_data.append(self._clean_data(item.copy()))
        
        return enriched_data
    
    async def _get_detail_data(self, detail_url: str) -> Dict[str, Any]:
        """获取详情页数据"""
        try:
            # 构建完整URL
            if detail_url.startswith('/'):
                full_url = urljoin(self.base_url, detail_url)
            else:
                full_url = detail_url
            
            # 在新标签页中打开详情页
            page = self.browser_manager.page
            detail_page = await page.context.new_page()
            
            try:
                # 访问详情页 - 延长超时时间到90秒
                await detail_page.goto(full_url, timeout=90000)
                await detail_page.wait_for_load_state('networkidle', timeout=60000)
                
                # 提取详情页数据
                detail_data = await self.page_parser.extract_detail_data(detail_page)
                
                return detail_data
                
            finally:
                await detail_page.close()
                
        except Exception as e:
            self.logger.error(f"获取详情页数据失败 {detail_url}: {e}")
            return {}
    
    def _clean_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """清理和标准化数据"""
        cleaned = {}
        
        # 🔥 修复：标准化字段名（确保使用正确的字段映射）
        field_mapping = {
            'title': 'title',
            'contact_person': 'contact_person',
            'contact_info': 'contact_info',
            'business_type': 'business_type',
            'area_info': 'area_info',
            'building_area': 'building_area',
            'acceptable_rent': 'acceptable_rent',
            'publish_time': 'publish_date',  # 兼容旧字段名
            'publish_date': 'publish_date',  # 新的标准字段名
            'detail_url': 'detail_url',
            'city': 'city',
            'data_source': 'data_source',
            'description': 'description',
            'crawl_time': 'crawl_time'  # 添加爬取时间字段
        }
        
        for old_key, new_key in field_mapping.items():
            if old_key in data:
                value = data[old_key]
                if isinstance(value, str):
                    value = value.strip()
                cleaned[new_key] = value
        
        # 确保必需字段存在
        required_fields = {
            'city': '深圳',
            'contact_person': '',
            'contact_info': '',
            'business_type': '餐饮',
            'acceptable_rent': '面议',
            'building_area': '',
            'publish_date': None,
            'detail_url': '',
            'data_source': 'pupuwang'
        }
        
        for field, default_value in required_fields.items():
            if field not in cleaned:
                cleaned[field] = default_value
        
        return cleaned
