import queue
from hjl_spider.spider.Crawl import CrawlSpider
from hjl_spider.download import HTTP
from hjl_spider.stockpile import DATA
import threading


class CorSpider(CrawlSpider):
    '''
    使用线程的爬虫
    '''
    response_queue = queue.Queue(50000)  # response队列
    INIT_NAME = "corspider"

    def download(self):
        '''
        下载器
        :return:
        '''
        if not self.url_manage.is_nose():
            request = self.url_manage.gets()
            response = request.download()
            if response:  # 下载成功
                self.response_queue.put(response)

    def download_process(self):
        headers = self.setting.get("SPIDER_HEADERS", None)

    def analysis(self):
        '''
        解析器
        :return:
        '''

        if not self.response_queue.empty():
            response = self.response_queue.get()
            move = response.move  # 拿到迁移函数
            data = getattr(self, move)(response)
            if data is None:
                raise Exception("没有得到一个好的返回值")

            for da in data:  # 根据用户访问调用
                if isinstance(da, HTTP.HttpRequest):
                    da.kwargs['setting'] = self.setting
                    self.url_manage.gets_is_download(da)

                elif isinstance(da, DATA.DataFile):
                    self.pipline(da)

                else:
                    raise Exception("这不是一个合法的返回，返回应该是一个item或者httprequest")

    def dispatch(self):
        while True:  # 爬虫开始
            re_q = self.response_queue.empty()
            re_u = self.url_manage.is_nose()

            if re_q and re_u:  # 队列都为空，爬虫结束
                break

            self.download()  # 下载
            self.analysis()

    def run(self):
        self.start_url_sp()  # 初始化开始url

        process = []
        for i in range(3):  # 创建下载器，解析器6个进程
            s2 = threading.Thread(target=self.dispatch)
            process.append(s2)
            s2.start()

        for prs in process:
            prs.join()
