from hjl_spider.download import HTTP
from hjl_spider.stockpile import DATA
from hjl_spider.manage import Urlmanage


class CrawlSpider(object):
    start_url = []
    priserver = []
    INIT_NAME = "crawl"

    def __init__(self, setting: dict):
        self.url_manage = Urlmanage.CrawlManage(None)  # 得到url管理器
        self.setting = setting  # 配置文件

    def pares(self, response):  # 用户定义类
        yield

    def pipline(self, item):  # 用户保存类
        pass

    def start_url_sp(self):  # 初始化url
        for url in self.start_url:
            request = HTTP.HttpRequest(url, setting=self.setting)
            self.url_manage.gets_is_download(request)

    def save(self):
        '''
        调用数据保持器，保存数据
        :return:
        '''
        for scr in self.priserver:
            scr.run()  # 调用保持器保存

    def run(self):  # 开始
        self.start_url_sp()

        while True:
            if self.url_manage.is_nose():
                break

            request = self.url_manage.gets()
            response = request.download

            move = response.move
            data = getattr(self, move)(response)

            for da in data:
                if isinstance(da, HTTP.HttpRequest):
                    self.url_manage.gets_is_download(da)

                elif isinstance(da, DATA.DataFile):
                    self.pipline(da)

        self.save()
