from typing import Optional

from queuelib import queue
from corasy.http.Request import Request
from corasy.engine import Engine
from corasy import settings
from corasy.utils.Spder_Log import spider_log


class Spider(object):
    name: Optional[str] = None  # 爬虫名称
    custom_settings: Optional[dict] = None  # 单个爬虫的配置
    start_urls: Optional[list] = None

    def __init__(self, name=None, **kwargs):
        if name is not None:
            self.name = name
        elif not getattr(self, 'name', None):
            raise ValueError(f"{type(self).__name__} must have a name")
        self.__dict__.update(kwargs)
        if not hasattr(self, "start_urls"):
            self.start_urls = []
        self.get_settings()
        self.logger = self.logger()

    def get_settings(self):
        self.settings = {}
        for i in (dir(settings)):
            key = i.upper()
            self.settings[key] = getattr(settings, key)
        if hasattr(self, "custom_settings") and isinstance(self.custom_settings, dict):
            for i in self.custom_settings:
                key = i.upper()
                self.settings[key] = self.custom_settings.get(i)

    def get_logger(self):
        LOG_ENABLED = self.settings.get("LOG_ENABLED", False)
        LOG_FILE = self.settings.get("LOG_FILE", "")
        LOG_TO_CONSOLE = self.settings.get("LOG_TO_CONSOLE", False)
        LOG_LEVEL = self.settings.get("LOG_LEVEL", "INFO")
        LOG_FORMAT = self.settings.get("LOG_FORMAT", None)
        LOG_TO_FILE = self.settings.get("LOG_TO_FILE", False)
        if LOG_ENABLED:
            return ""
        elif not LOG_FILE:
            logger = spider_log(LOG_NAME=self.name)
        else:
            logger = spider_log(LOG_NAME=self.name, LOG_FILE=LOG_FILE,
                                LOG_TO_CONSOLE=LOG_TO_CONSOLE,
                                LOG_LEVEL=LOG_LEVEL.upper(),
                                LOG_FORMAT=LOG_FORMAT,
                                LOG_TO_FILE=LOG_TO_FILE
                                )
        return logger

    async def start_requests(self):
        """
        :return: AN async iterator
        """
        for url in self.start_urls:
            yield Request(url=url)

    def start(self):
        '''
            * 把爬虫传递到引擎中 用于初始化爬虫对象
            * engine.start()来启动爬虫
        '''
        engine = Engine(self)  # 传入爬虫对象
        # 该方法中封装了execute方法
        # execute方法封装了事件循环 用来实现爬虫的初始化的启动
        engine.start()  # 启动爬虫

    async def parse(self, response):
        '''
            解析下载器回来的响应数据 默认的解析回调方法
            返回有三种类型:
                * 请求Request
                * 数据dict/str
                * None值
        '''
        raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))