#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/1/25 22:40
# @Author  : Ramsey
# @Site    : zh
# @File    : engine.py
# @Software: PyCharm
import asyncio
from spider_framework.items.items import Item
from typing import Optional, Generator, Callable
from inspect import iscoroutine, isgenerator, isasyncgen
from spider_framework.spider import Spider
from spider_framework.http.request import Request
from spider_framework.utils.error import OutputError
from spider_framework.core.scheduler import Scheduler
from spider_framework.core.downloader import HttpxDownloader
from spider_framework.utils.transform import transform_outputs
from spider_framework.utils.task_manager import TaskManager
from spider_framework.core.processor import Processor
from spider_framework.utils.log import spider_logger


class Engine:

    def __init__(self, crawler):
        self.downloader: Optional[HttpxDownloader] = None
        self.scheduler: Optional[Scheduler] = None
        self.processor: Optional[Processor] = None
        self.start_url: Optional[Generator] = None
        self.spider: Optional[Spider] = None
        self.task_manager: Optional[TaskManager] = None
        self.running: bool = False
        self.crawler = crawler
        self.settings = crawler.settings
        print(self.settings)
        self.spider_logger = spider_logger(self.__class__.__name__, log_level=self.settings.get("LOG_LEVEL"))

    async def start_spider(self, spider_instance):
        """
        启动爬虫
        :param spider_instance: 爬虫实例
        :return:
        """
        self.running = True
        self.spider_logger.info(f"爬虫启动, 项目名: {self.crawler.settings.get('SPIDER_NAME')}")
        # self.spider_logger.debug(f"爬虫启动, 项目名: {self.crawler.settings.get('SPIDER_NAME')}")
        self.scheduler = Scheduler(crawler=self.crawler)
        self.processor = Processor(crawler=self.crawler)
        self.task_manager = TaskManager(max_semaphore=self.settings.get("CONCURRENCY", 10))
        self.spider = spider_instance
        if hasattr(self.scheduler, "open"):
            self.scheduler.open()
        if hasattr(self.processor, "open"):
            self.processor.open()
        # self.downloader = Downloader()
        self.downloader = HttpxDownloader(crawler=self.crawler)
        if hasattr(self.downloader, "open"):
            self.downloader.open()
        self.start_url = iter(spider_instance.start_request())
        await self._open_spider()

    async def _open_spider(self):
        crawling = asyncio.create_task(self.crawl())
        await crawling

    async def crawl(self):
        while self.running:
            if (request := await self._next_request()) is not None:
                await self._crawl(request=request)
            else:
                try:
                    start_url = next(self.start_url)
                except StopIteration:
                    # 迭代器消耗完成则会进入该处
                    self.start_url = None
                except Exception as error:
                    # 1.异步任务处理完成
                    # 2.队列没任务
                    # 3.下载器没任务
                    if not await self._exit():
                        continue
                    self.spider_logger.info(f"已到迭代器的末尾, 任务队列为空, 爬虫结束运行")
                    self.running = False

                    if self.start_url is not None:
                        self.spider_logger.info(f"爬虫迭代器未处理完, 但中途出现错误, 错误信息: {str(error)}")
                else:
                    await self._push_request(request=start_url)
        await self.close_spider()

    async def _crawl(self, request):

        async def crawl_task():
            # TODO 实现并发
            fetch_result = await self._fetch(request=request)
            # 处理outputs
            if fetch_result:
                await self._handle_spider_outputs(outputs=fetch_result)

        await self.task_manager.semaphore.acquire()
        self.task_manager.create_task(crawl_task())
        # asyncio.create_task(crawl_task())

    async def _fetch(self, request):
        async def _success(response):
            # 初始url调用spider.parse, 后续请求调用request.callback
            callback: Callable = request.callback or self.spider.parse
            if _outputs := callback(response=response):
                if iscoroutine(_outputs):
                    await _outputs
                else:
                    return transform_outputs(outputs=_outputs)

        _response = await self.downloader.fetch(request=request)
        if _response is None:
            return None
        outputs = await _success(response=_response)
        return outputs

    async def _next_request(self):
        """
        获取调度器中的请求
        :return:
        """
        request = await self.scheduler.next_request()
        return request

    async def _push_request(self, request):
        """
        将请求放入调度器
        :param request: 请求的url
        :return:
        """
        # TODO 过滤器
        await self.scheduler.push_request(request=request)

    async def _handle_spider_outputs(self, outputs):
        async for output in outputs:
            # if isinstance(output, Request):
            #     await self._push_request(request=output)
            # elif isinstance(output, Item):
            #     print(output)
            if isinstance(output, (Request, Item)):
                await self.processor.add_2_queue(output=output)
            else:
                raise OutputError("爬虫返回的outputs必须是`Request`类型")

    async def _exit(self):
        if all([
            self.scheduler.idea(),
            self.downloader.idea(),
            self.task_manager.idea(),
            self.processor.idea()
        ]):
            return True
        return False

    async def close_spider(self):
        self.crawler.close()


if __name__ == "__main__":
    pass
