"""
爬虫控制器核心逻辑
Scraper Controller Core Logic

协调所有模块，控制整个爬取流程
Coordinates all modules and controls the entire scraping workflow
"""

import time
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta

from .config_manager import ConfigManager
from .http_client import HttpClient, HttpClientError
from .data_parser import DataParser
from .data_storage import DataStorage
from .models import PropertyListing


class ScraperControllerError(Exception):
    """爬虫控制器异常"""
    pass


class ScraperController:
    """
    爬虫控制器类
    Scraper Controller Class

    协调ConfigManager、HttpClient、DataParser、DataStorage等模块，
    实现多城市、多业务类型的数据抓取工作流程。
    """

    def __init__(self, config_file: Optional[str] = None):
        """
        初始化爬虫控制器

        Args:
            config_file: 配置文件路径，如果为None则使用默认配置
        """
        # 初始化配置管理器
        self.config = ConfigManager(config_file)

        # 初始化各个组件
        self.http_client = HttpClient(self.config)
        self.data_parser = DataParser()
        self.data_storage = DataStorage(self.config)

        # 设置日志（临时方案，直到LogManager实现）
        self._setup_logging()

        # 统计信息
        self.statistics = {
            'start_time': None,
            'end_time': None,
            'total_combinations': 0,
            'successful_combinations': 0,
            'failed_combinations': 0,
            'total_pages_processed': 0,
            'total_items_found': 0,
            'total_items_saved': 0,
            'total_errors': {
                'network_errors': 0,
                'parse_errors': 0,
                'storage_errors': 0
            },
            'city_business_stats': {},
            'processing_time': 0
        }

        self.logger.info("ScraperController initialized successfully")

    def _setup_logging(self):
        """设置临时日志配置"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.StreamHandler(),
                logging.FileHandler('scraper.log', encoding='utf-8')
            ]
        )
        self.logger = logging.getLogger(__name__)

    def run(self) -> Dict[str, Any]:
        """
        运行爬虫主流程

        Returns:
            Dict[str, Any]: 爬取统计信息
        """
        self.logger.info("Starting scraper controller...")
        self.statistics['start_time'] = datetime.now()

        try:
            # 获取要爬取的城市和业态列表
            cities = self.config.get_cities()
            business_types = self.config.get_business_types()

            self.logger.info(f"Target cities: {cities}")
            self.logger.info(f"Target business types: {business_types}")

            # 计算总组合数
            total_combinations = len(cities) * len(business_types)
            self.statistics['total_combinations'] = total_combinations

            self.logger.info(f"Total combinations to process: {total_combinations}")

            # 遍历所有城市和业态组合
            for city in cities:
                for business_type in business_types:
                    try:
                        self.logger.info(f"Processing {city} - {business_type}")
                        combination_stats = self._scrape_city_business_type(city, business_type)

                        # 记录组合统计
                        key = f"{city}_{business_type}"
                        self.statistics['city_business_stats'][key] = combination_stats

                        if combination_stats['success']:
                            self.statistics['successful_combinations'] += 1
                        else:
                            self.statistics['failed_combinations'] += 1

                    except Exception as e:
                        self.logger.error(f"Failed to process {city} - {business_type}: {e}")
                        self.statistics['failed_combinations'] += 1

                        # 记录失败的组合
                        key = f"{city}_{business_type}"
                        self.statistics['city_business_stats'][key] = {
                            'success': False,
                            'error': str(e),
                            'pages_processed': 0,
                            'items_found': 0,
                            'items_saved': 0
                        }

            self.statistics['end_time'] = datetime.now()
            self.statistics['processing_time'] = (
                self.statistics['end_time'] - self.statistics['start_time']
            ).total_seconds()

            self._log_final_summary()
            return self.statistics

        except Exception as e:
            self.logger.error(f"Critical error in scraper controller: {e}")
            raise ScraperControllerError(f"Scraper controller failed: {e}")

    def _scrape_city_business_type(self, city: str, business_type: str) -> Dict[str, Any]:
        """
        爬取指定城市和业态的数据

        Args:
            city: 城市代码
            business_type: 业态类型

        Returns:
            Dict[str, Any]: 该组合的统计信息
        """
        combination_stats = {
            'success': False,
            'total_pages': 0,
            'pages_processed': 0,
            'items_found': 0,
            'items_saved': 0,
            'errors': []
        }

        try:
            # 获取总页数
            total_pages = self._get_total_pages(city, business_type)
            if total_pages <= 0:
                self.logger.warning(f"No pages found for {city} - {business_type}")
                return combination_stats

            combination_stats['total_pages'] = total_pages
            self.logger.info(f"Found {total_pages} pages for {city} - {business_type}")

            # 爬取所有页面
            pages_stats = self._scrape_pages(city, business_type, total_pages)

            # 合并统计信息
            combination_stats.update(pages_stats)
            combination_stats['success'] = True

            self.logger.info(
                f"Completed {city} - {business_type}: "
                f"{combination_stats['pages_processed']}/{total_pages} pages, "
                f"{combination_stats['items_saved']} items saved"
            )

        except Exception as e:
            self.logger.error(f"Error processing {city} - {business_type}: {e}")
            combination_stats['errors'].append(str(e))

        return combination_stats

    def _get_total_pages(self, city: str, business_type: str) -> int:
        """
        获取总页数

        Args:
            city: 城市代码
            business_type: 业态类型

        Returns:
            int: 总页数，如果获取失败返回0
        """
        try:
            # 请求第一页获取总页数信息
            response_data = self.http_client.get_page_data(city, business_type, page=1)

            # 首先尝试从JSON响应中提取总页数
            total_pages = response_data.get('total_page', 0)
            if total_pages and isinstance(total_pages, (int, str)):
                if isinstance(total_pages, str):
                    total_pages = int(total_pages)
                return max(0, total_pages)

            # 如果没有total_page字段，尝试从HTML中解析分页信息
            html_content = response_data.get('html', '')
            if html_content:
                total_pages = self._extract_total_pages_from_html(html_content)
                if total_pages > 0:
                    return total_pages

            # 如果无法获取总页数，先尝试解析第一页看是否有数据
            # 如果第一页有数据，则至少有1页
            parsed_listings = self.data_parser.parse_response(response_data, city, business_type)
            if parsed_listings:
                self.logger.info(f"Found {len(parsed_listings)} items on first page for {city} - {business_type}")
                # 暂时返回1页，后续可以通过尝试请求更多页面来确定总页数
                return 1
            else:
                self.logger.warning(f"No data found on first page for {city} - {business_type}")
                return 0

        except HttpClientError as e:
            self.logger.error(f"Network error getting total pages for {city} - {business_type}: {e}")
            self.statistics['total_errors']['network_errors'] += 1
            return 0
        except (ValueError, KeyError) as e:
            self.logger.error(f"Parse error getting total pages for {city} - {business_type}: {e}")
            self.statistics['total_errors']['parse_errors'] += 1
            return 0
        except Exception as e:
            self.logger.error(f"Unexpected error getting total pages for {city} - {business_type}: {e}")
            return 0

    def _extract_total_pages_from_html(self, html_content: str) -> int:
        """
        从HTML内容中提取总页数

        Args:
            html_content: HTML内容

        Returns:
            int: 总页数，如果无法提取则返回0
        """
        try:
            from bs4 import BeautifulSoup
            soup = BeautifulSoup(html_content, 'html.parser')

            # 尝试查找分页相关的元素
            # 常见的分页选择器
            pagination_selectors = [
                '.pagination .page-item:last-child a',  # Bootstrap分页
                '.pagination a:last-child',
                '.page-nav a:last-child',
                '.pager a:last-child',
                'a[title="末页"]',
                'a[title="最后一页"]',
                '.page-link:last-child'
            ]

            for selector in pagination_selectors:
                elements = soup.select(selector)
                for element in elements:
                    # 尝试从href中提取页码
                    href = element.get('href', '')
                    if 'page=' in href:
                        import re
                        match = re.search(r'page=(\d+)', href)
                        if match:
                            return int(match.group(1))

                    # 尝试从文本中提取页码
                    text = element.get_text().strip()
                    if text.isdigit():
                        return int(text)

            # 如果找不到分页信息，返回0
            return 0

        except Exception as e:
            self.logger.debug(f"Failed to extract total pages from HTML: {e}")
            return 0

    def _scrape_pages(self, city: str, business_type: str, total_pages: int) -> Dict[str, Any]:
        """
        爬取所有页面数据

        Args:
            city: 城市代码
            business_type: 业态类型
            total_pages: 总页数

        Returns:
            Dict[str, Any]: 页面爬取统计信息
        """
        pages_stats = {
            'pages_processed': 0,
            'items_found': 0,
            'items_saved': 0,
            'errors': []
        }

        # 获取请求延迟配置
        request_delay = self.config.get_request_delay()

        for page in range(1, total_pages + 1):
            try:
                self.logger.debug(f"Processing page {page}/{total_pages} for {city} - {business_type}")

                # 爬取单页数据
                page_stats = self._scrape_single_page(city, business_type, page)

                # 累计统计信息
                pages_stats['pages_processed'] += 1
                pages_stats['items_found'] += page_stats['items_found']
                pages_stats['items_saved'] += page_stats['items_saved']

                if page_stats['errors']:
                    pages_stats['errors'].extend(page_stats['errors'])

                # 更新全局统计
                self.statistics['total_pages_processed'] += 1
                self.statistics['total_items_found'] += page_stats['items_found']
                self.statistics['total_items_saved'] += page_stats['items_saved']

                # 应用请求延迟（除了最后一页）
                if page < total_pages:
                    self.logger.debug(f"Waiting {request_delay} seconds before next request...")
                    time.sleep(request_delay)

            except Exception as e:
                self.logger.error(f"Error processing page {page} for {city} - {business_type}: {e}")
                pages_stats['errors'].append(f"Page {page}: {str(e)}")

                # 即使单页失败，也继续处理下一页
                continue

        return pages_stats

    def _scrape_single_page(self, city: str, business_type: str, page: int) -> Dict[str, Any]:
        """
        爬取单个页面数据

        Args:
            city: 城市代码
            business_type: 业态类型
            page: 页码

        Returns:
            Dict[str, Any]: 单页爬取统计信息
        """
        page_stats = {
            'items_found': 0,
            'items_saved': 0,
            'errors': []
        }

        try:
            # 1. 获取页面数据
            response_data = self.http_client.get_page_data(city, business_type, page)

            # 2. 解析数据
            try:
                parsed_listings = self.data_parser.parse_response(response_data, city, business_type)
                page_stats['items_found'] = len(parsed_listings)

                if not parsed_listings:
                    self.logger.debug(f"No items found on page {page} for {city} - {business_type}")
                    return page_stats

            except Exception as e:
                self.logger.error(f"Parse error on page {page} for {city} - {business_type}: {e}")
                self.statistics['total_errors']['parse_errors'] += 1
                page_stats['errors'].append(f"Parse error: {str(e)}")
                return page_stats

            # 3. 存储数据
            try:
                # 转换为字典格式用于存储
                data_dicts = [listing.to_dict() for listing in parsed_listings]
                saved_count = self.data_storage.save_data(data_dicts, city, business_type)
                page_stats['items_saved'] = saved_count

                self.logger.debug(
                    f"Page {page} for {city} - {business_type}: "
                    f"found {page_stats['items_found']}, saved {saved_count}"
                )

            except Exception as e:
                self.logger.error(f"Storage error on page {page} for {city} - {business_type}: {e}")
                self.statistics['total_errors']['storage_errors'] += 1
                page_stats['errors'].append(f"Storage error: {str(e)}")

        except HttpClientError as e:
            self.logger.error(f"Network error on page {page} for {city} - {business_type}: {e}")
            self.statistics['total_errors']['network_errors'] += 1
            page_stats['errors'].append(f"Network error: {str(e)}")

        except Exception as e:
            self.logger.error(f"Unexpected error on page {page} for {city} - {business_type}: {e}")
            page_stats['errors'].append(f"Unexpected error: {str(e)}")

        return page_stats

    def get_statistics(self) -> Dict[str, Any]:
        """
        获取详细的爬取统计信息

        Returns:
            Dict[str, Any]: 完整的统计信息
        """
        stats = self.statistics.copy()

        # 计算成功率
        if stats['total_combinations'] > 0:
            stats['success_rate'] = stats['successful_combinations'] / stats['total_combinations']
        else:
            stats['success_rate'] = 0.0

        # 计算平均处理速度
        if stats['processing_time'] > 0:
            stats['pages_per_second'] = stats['total_pages_processed'] / stats['processing_time']
            stats['items_per_second'] = stats['total_items_found'] / stats['processing_time']
        else:
            stats['pages_per_second'] = 0.0
            stats['items_per_second'] = 0.0

        # 计算去重率
        if stats['total_items_found'] > 0:
            stats['dedup_rate'] = 1 - (stats['total_items_saved'] / stats['total_items_found'])
        else:
            stats['dedup_rate'] = 0.0

        # 添加HTTP客户端统计
        if hasattr(self.http_client, 'stats'):
            stats['http_stats'] = self.http_client.stats.copy()

        # 添加数据解析器统计
        if hasattr(self.data_parser, 'error_count'):
            stats['parser_error_count'] = self.data_parser.error_count

        return stats

    def _log_final_summary(self):
        """记录最终汇总信息"""
        stats = self.get_statistics()

        self.logger.info("=" * 60)
        self.logger.info("SCRAPING COMPLETED - FINAL SUMMARY")
        self.logger.info("=" * 60)

        # 基本统计
        self.logger.info(f"Processing time: {stats['processing_time']:.2f} seconds")
        self.logger.info(f"Total combinations: {stats['total_combinations']}")
        self.logger.info(f"Successful combinations: {stats['successful_combinations']}")
        self.logger.info(f"Failed combinations: {stats['failed_combinations']}")
        self.logger.info(f"Success rate: {stats['success_rate']:.2%}")

        # 数据统计
        self.logger.info(f"Total pages processed: {stats['total_pages_processed']}")
        self.logger.info(f"Total items found: {stats['total_items_found']}")
        self.logger.info(f"Total items saved: {stats['total_items_saved']}")
        self.logger.info(f"Deduplication rate: {stats['dedup_rate']:.2%}")

        # 性能统计
        self.logger.info(f"Pages per second: {stats['pages_per_second']:.2f}")
        self.logger.info(f"Items per second: {stats['items_per_second']:.2f}")

        # 错误统计
        total_errors = sum(stats['total_errors'].values())
        self.logger.info(f"Total errors: {total_errors}")
        if total_errors > 0:
            for error_type, count in stats['total_errors'].items():
                if count > 0:
                    self.logger.info(f"  {error_type}: {count}")

        # 各组合详细统计
        self.logger.info("\nDetailed statistics by city and business type:")
        for key, combination_stats in stats['city_business_stats'].items():
            city, business_type = key.split('_', 1)
            status = "✓" if combination_stats['success'] else "✗"
            self.logger.info(
                f"  {status} {city}-{business_type}: "
                f"{combination_stats.get('pages_processed', 0)} pages, "
                f"{combination_stats.get('items_saved', 0)} items saved"
            )

            if not combination_stats['success'] and 'error' in combination_stats:
                self.logger.info(f"    Error: {combination_stats['error']}")

        self.logger.info("=" * 60)

    def stop(self):
        """
        停止爬虫（为将来的优雅停止功能预留）
        """
        self.logger.info("Scraper controller stop requested")
        # 这里可以添加停止逻辑，比如设置停止标志
        # 目前的实现是同步的，所以这个方法主要是为了接口完整性

    def is_running(self) -> bool:
        """
        检查爬虫是否正在运行

        Returns:
            bool: 是否正在运行
        """
        return (
            self.statistics['start_time'] is not None and
            self.statistics['end_time'] is None
        )

    def get_progress(self) -> Dict[str, Any]:
        """
        获取当前进度信息

        Returns:
            Dict[str, Any]: 进度信息
        """
        if not self.is_running():
            return {'status': 'not_running', 'progress': 0.0}

        total_combinations = self.statistics['total_combinations']
        completed_combinations = (
            self.statistics['successful_combinations'] +
            self.statistics['failed_combinations']
        )

        if total_combinations > 0:
            progress = completed_combinations / total_combinations
        else:
            progress = 0.0

        return {
            'status': 'running',
            'progress': progress,
            'completed_combinations': completed_combinations,
            'total_combinations': total_combinations,
            'pages_processed': self.statistics['total_pages_processed'],
            'items_found': self.statistics['total_items_found'],
            'items_saved': self.statistics['total_items_saved']
        }