"""
爬虫管理器
协调所有爬虫的工作，管理爬取流程
"""
import logging
from typing import Optional, List
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.db import close_old_connections
from django.db import transaction

from spider.core.list_page_spider import ListPageSpider
from spider.core.detail_page_spider import DetailPageSpider
from spider.core.sub_detail_page_spider import SubDetailPageSpider
from spider.core.rate_limiter import RateLimiter
from spider.core.proxy_manager import ProxyManager
from spider.data.storage import StorageManager
from spider.models import CrawlTask, ListItem, DetailItem
from spider.config.settings import LIST_API_URL, MAX_CONCURRENT_REQUESTS

logger = logging.getLogger(__name__)


class SpiderManager:
    """爬虫管理器"""
    
    def __init__(self):
        # 初始化组件
        self.rate_limiter = RateLimiter()
        self.proxy_manager = ProxyManager()
        self.storage_manager = StorageManager()
        
        # 初始化爬虫
        self.list_spider = ListPageSpider(
            rate_limiter=self.rate_limiter,
            proxy_manager=self.proxy_manager,
            storage_manager=self.storage_manager,
        )
        self.detail_spider = DetailPageSpider(
            rate_limiter=self.rate_limiter,
            proxy_manager=self.proxy_manager,
            storage_manager=self.storage_manager,
        )
        self.sub_detail_spider = SubDetailPageSpider(
            rate_limiter=self.rate_limiter,
            proxy_manager=self.proxy_manager,
            storage_manager=self.storage_manager,
        )
    
    def crawl_list_page(
        self,
        url: Optional[str] = None,
        page_number: int = 1,
        max_pages: Optional[int] = None,
        time_begin: Optional[str] = None,
        time_end: Optional[str] = None,
        **api_params
    ) -> int:
        """
        爬取列表页（使用API接口）
        
        Args:
            url: 列表页URL（已废弃，保留以兼容接口，实际使用API URL）
            page_number: 起始页码，默认从第1页开始
            max_pages: 最大爬取页数，None表示爬取所有页
            time_begin: 开始时间，格式：YYYY-MM-DD
            time_end: 结束时间，格式：YYYY-MM-DD
            **api_params: 其他API参数
        
        Returns:
            保存的列表项数量
        """
        # 使用API URL而不是展示页URL
        logger.info(f"Starting to crawl list page via API: {LIST_API_URL}")
        
        count = self.list_spider.crawl_and_save(
            url=None,  # 不使用URL参数，直接使用API
            page_number=page_number,
            max_pages=max_pages,
            time_begin=time_begin,
            time_end=time_end,
            **api_params
        )
        logger.info(f"Crawled {count} items from list page")
        
        return count
    
    def crawl_pending_details(self, limit: Optional[int] = None, workers: Optional[int] = None, quiet: bool = False, progress_cb=None):
        """
        爬取待处理的详情页
        
        Args:
            limit: 最大爬取数量
        
        Returns:
            成功爬取的详情页数量
        """
        tasks_qs = self.storage_manager.get_pending_tasks('detail', limit)
        tasks = list(tasks_qs)
        if not quiet:
            logger.info(f"Found {len(tasks)} pending detail tasks")

        max_workers = workers or MAX_CONCURRENT_REQUESTS or 1
        if max_workers <= 1:
            success_count = 0
            failure_count = 0
            processed_count = 0
            failures = []
            for task in tasks:
                try:
                    if task.status == 'failed':
                        if not quiet:
                            logger.info(f"Retrying failed task: {task.url}")
                        self.storage_manager.update_task_status(task, 'crawling')
                    list_item = DetailPageSpider()._find_list_item_by_url(task.url)
                    if not list_item and task.parent_task:
                        list_item = ListItem.objects.filter(task=task.parent_task).first()
                    detail_item = DetailPageSpider(storage_manager=self.storage_manager).crawl_and_save(task.url, list_item)
                    if detail_item:
                        success_count += 1
                    else:
                        task.refresh_from_db()
                        # 若仍为 crawling，说明爬虫未写失败状态，补充失败原因
                        if getattr(task, 'status', None) == 'crawling':
                            self.storage_manager.update_task_status(task, 'failed', 'List item not found or parse failed')
                        failure_count += 1
                        failures.append((task.url, getattr(task, 'error_message', '') or 'List item not found or parse failed'))
                except Exception as e:
                    failure_count += 1
                    if not quiet:
                        logger.error(f"Failed to crawl detail page {task.url}: {e}")
                    failures.append((task.url, str(e)))
                    continue
                finally:
                    processed_count += 1
                    if progress_cb:
                        try:
                            progress_cb(processed_count, success_count, failure_count, len(tasks))
                        except Exception:
                            pass
            if not quiet:
                logger.info(f"Crawled {success_count} detail pages")
            return success_count, failure_count, failures

        def _work(task):
            close_old_connections()
            try:
                if task.status == 'failed':
                    self.storage_manager.update_task_status(task, 'crawling')
                local_spider = DetailPageSpider(storage_manager=self.storage_manager)
                list_item = local_spider._find_list_item_by_url(task.url)
                if not list_item and task.parent_task:
                    list_item = ListItem.objects.filter(task=task.parent_task).first()
                detail_item = local_spider.crawl_and_save(task.url, list_item)
                return 1 if detail_item else 0
            except Exception as e:
                logger.error(f"Failed to crawl detail page {task.url}: {e}")
                return 0

        success_count = 0
        processed_count = 0
        failures = []
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            def _work_with_reason(task):
                close_old_connections()
                try:
                    if task.status == 'failed':
                        self.storage_manager.update_task_status(task, 'crawling')
                    local_spider = DetailPageSpider(storage_manager=self.storage_manager)
                    list_item = local_spider._find_list_item_by_url(task.url)
                    if not list_item and task.parent_task:
                        list_item = ListItem.objects.filter(task=task.parent_task).first()
                    detail_item = local_spider.crawl_and_save(task.url, list_item)
                    if detail_item:
                        return (1, None, task.url)
                    task.refresh_from_db()
                    reason = getattr(task, 'error_message', '') or 'List item not found or parse failed'
                    # 若仍为 crawling，补充失败状态
                    if getattr(task, 'status', None) == 'crawling':
                        self.storage_manager.update_task_status(task, 'failed', reason)
                    return (0, reason, task.url)
                except Exception as e:
                    return (0, str(e), task.url)

            futures = [executor.submit(_work_with_reason, task) for task in tasks]
            for f in as_completed(futures):
                try:
                    res = f.result()
                    success_count += int(res[0] or 0)
                    if res[0] == 0:
                        failures.append((res[2], res[1]))
                except Exception:
                    pass
                finally:
                    processed_count += 1
                    if progress_cb:
                        try:
                            progress_cb(processed_count, success_count, processed_count - success_count, len(tasks))
                        except Exception:
                            pass
        if not quiet:
            logger.info(f"Crawled {success_count} detail pages")
        failure_count = len(tasks) - success_count
        return success_count, failure_count, failures
    
    def crawl_pending_sub_details(self, limit: Optional[int] = None, workers: Optional[int] = None, quiet: bool = False, progress_cb=None):
        """
        爬取待处理的子详情页
        
        Args:
            limit: 最大爬取数量
        
        Returns:
            成功爬取的子详情页数量
        """
        tasks_qs = self.storage_manager.get_pending_tasks('sub_detail', limit)
        tasks = list(tasks_qs)
        if not quiet:
            logger.info(f"Found {len(tasks)} pending sub detail tasks")

        max_workers = workers or MAX_CONCURRENT_REQUESTS or 1
        if max_workers <= 1:
            success_count = 0
            failure_count = 0
            processed_count = 0
            failures = []
            for task in tasks:
                try:
                    if task.status == 'failed':
                        if not quiet:
                            logger.info(f"Retrying failed task: {task.url}")
                        self.storage_manager.update_task_status(task, 'crawling')
                    detail_item = None
                    if task.parent_task:
                        detail_item = DetailItem.objects.filter(task=task.parent_task).first()
                    if not detail_item:
                        try:
                            detail_item = DetailItem.objects.filter(sub_detail_urls__contains=task.url).first()
                        except Exception:
                            all_details = DetailItem.objects.all()
                            for item in all_details:
                                if task.url in (item.sub_detail_urls or []):
                                    detail_item = item
                                    break
                    if not detail_item:
                        try:
                            normalized_url = DetailItem._normalize_url(task.url)
                            all_details = DetailItem.objects.all()
                            for item in all_details:
                                normalized_item_urls = [DetailItem._normalize_url(u) for u in (item.sub_detail_urls or [])]
                                if normalized_url in normalized_item_urls:
                                    detail_item = item
                                    break
                        except Exception:
                            pass
                    success = SubDetailPageSpider(storage_manager=self.storage_manager).crawl_and_save(task.url, detail_item)
                    if success:
                        success_count += 1
                    else:
                        failure_count += 1
                        task.refresh_from_db()
                        failures.append((task.url, getattr(task, 'error_message', '') or 'Unknown failure'))
                except Exception as e:
                    failure_count += 1
                    if not quiet:
                        logger.error(f"Failed to crawl sub detail page {task.url}: {e}")
                    failures.append((task.url, str(e)))
                    continue
                finally:
                    processed_count += 1
                    if progress_cb:
                        try:
                            progress_cb(processed_count, success_count, failure_count, len(tasks))
                        except Exception:
                            pass
            if not quiet:
                logger.info(f"Crawled {success_count} sub detail pages")
            return success_count, failure_count, failures

        def _work(task):
            close_old_connections()
            try:
                if task.status == 'failed':
                    self.storage_manager.update_task_status(task, 'crawling')
                detail_item = None
                if task.parent_task:
                    detail_item = DetailItem.objects.filter(task=task.parent_task).first()
                if not detail_item:
                    try:
                        detail_item = DetailItem.objects.filter(sub_detail_urls__contains=task.url).first()
                    except Exception:
                        all_details = DetailItem.objects.all()
                        for item in all_details:
                            if task.url in (item.sub_detail_urls or []):
                                detail_item = item
                                break
                if not detail_item:
                    try:
                        normalized_url = DetailItem._normalize_url(task.url)
                        all_details = DetailItem.objects.all()
                        for item in all_details:
                            normalized_item_urls = [DetailItem._normalize_url(u) for u in (item.sub_detail_urls or [])]
                            if normalized_url in normalized_item_urls:
                                detail_item = item
                                break
                    except Exception:
                        pass
                local_spider = SubDetailPageSpider(storage_manager=self.storage_manager)
                success = local_spider.crawl_and_save(task.url, detail_item)
                return 1 if success else 0
            except Exception as e:
                logger.error(f"Failed to crawl sub detail page {task.url}: {e}")
                return 0

        success_count = 0
        processed_count = 0
        failures = []
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            def _work2(task):
                close_old_connections()
                try:
                    if task.status == 'failed':
                        self.storage_manager.update_task_status(task, 'crawling')
                    detail_item = None
                    if task.parent_task:
                        detail_item = DetailItem.objects.filter(task=task.parent_task).first()
                    if not detail_item:
                        try:
                            detail_item = DetailItem.objects.filter(sub_detail_urls__contains=task.url).first()
                        except Exception:
                            all_details = DetailItem.objects.all()
                            for item in all_details:
                                if task.url in (item.sub_detail_urls or []):
                                    detail_item = item
                                    break
                    if not detail_item:
                        try:
                            normalized_url = DetailItem._normalize_url(task.url)
                            all_details = DetailItem.objects.all()
                            for item in all_details:
                                normalized_item_urls = [DetailItem._normalize_url(u) for u in (item.sub_detail_urls or [])]
                                if normalized_url in normalized_item_urls:
                                    detail_item = item
                                    break
                        except Exception:
                            pass
                    local_spider = SubDetailPageSpider(storage_manager=self.storage_manager)
                    success = local_spider.crawl_and_save(task.url, detail_item)
                    if success:
                        return (1, None, task.url)
                    task.refresh_from_db()
                    return (0, getattr(task, 'error_message', '') or 'Unknown failure', task.url)
                except Exception as e:
                    return (0, str(e), task.url)

            futures = [executor.submit(_work2, task) for task in tasks]
            for f in as_completed(futures):
                try:
                    res = f.result()
                    success_count += int(res[0] or 0)
                    if res[0] == 0:
                        failures.append((res[2], res[1]))
                except Exception:
                    pass
                finally:
                    processed_count += 1
                    if progress_cb:
                        try:
                            progress_cb(processed_count, success_count, processed_count - success_count, len(tasks))
                        except Exception:
                            pass
        if not quiet:
            logger.info(f"Crawled {success_count} sub detail pages")
        failure_count = len(tasks) - success_count
        return success_count, failure_count, failures
    
    def crawl_all(
        self,
        list_url: Optional[str] = None,
        page_number: int = 1,
        max_pages: Optional[int] = None,
        time_begin: Optional[str] = None,
        time_end: Optional[str] = None,
        **api_params
    ):
        """
        执行完整的爬取流程
        
        Args:
            list_url: 列表页URL（已废弃，保留以兼容接口）
            page_number: 起始页码，默认从第1页开始
            max_pages: 最大爬取页数，None表示爬取所有页
            time_begin: 开始时间，格式：YYYY-MM-DD
            time_end: 结束时间，格式：YYYY-MM-DD
            **api_params: 其他API参数
        """
        logger.info("Starting full crawl process")
        
        try:
            # 1. 爬取列表页
            self.crawl_list_page(
                url=list_url,
                page_number=page_number,
                max_pages=max_pages,
                time_begin=time_begin,
                time_end=time_end,
                **api_params
            )
            
            # 2. 爬取详情页
            self.crawl_pending_details()
            
            # 3. 爬取子详情页
            self.crawl_pending_sub_details()
            
            logger.info("Full crawl process completed")
            
        except Exception as e:
            logger.error(f"Error in full crawl process: {e}")
            raise
    
    def close(self, quiet: bool = False):
        """关闭所有爬虫"""
        self.list_spider.close()
        self.detail_spider.close()
        self.sub_detail_spider.close()
        if not quiet:
            logger.info("All spiders closed")

