from concurrent.futures import ThreadPoolExecutor
from queue import LifoQueue
from v3.Error import *
from v3.Pipeline import DefaultPipeline
from v3.ResultProcessor import DefaultResultProcessor
import sys
import threading
from v3.Downloader import DefaultDownloader, Filter
from v3.Page import Page
import copy
import logging
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(filename)s line:%(lineno)d [%(levelname)s] %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')

logger = logging.getLogger()
logger.info('start')


class Spider(object):
    def __init__(self, request_items,
                 sock_puppet,
                 pipeline=None,
                 result_processor=None,
                 thread_num=3,
                 retry_times=3,
                 sleep_time=0.5):
        """
        :param request_items: RequestItem的列表
        :param pipeline: 管道类
        :param result_processor: 结果处理类
        :param thread_num: 线程数量
        :param headers_list: headers的列表
        :param sleep_time: 爬虫间隔时间, 单位秒, 可以为浮点数
        """
        self.request_items = request_items
        self.pipeline = DefaultPipeline() if pipeline is None else pipeline
        self.result_processor = DefaultResultProcessor() if result_processor is None else result_processor
        self.retry_times = retry_times
        self.thread_num = thread_num
        self.request_queue = LifoQueue()
        self.sock_puppet = sock_puppet
        self.sleep_time = sleep_time
        self.thread_pool = None
        self.filter = Filter()
        self.future_list = []
        self.lock = threading.Lock()
        self.create()

    def download_and_process(self, page, sleep_time):
        request_item = page.get_request_item()
        downloader = DefaultDownloader(sock_puppet=self.sock_puppet)
        res, i = downloader.download(request_item=request_item, sleep_time=sleep_time)
        response_text = None
        if res:
            response_text = self.filter.filter(response=res)
        if response_text:
            result_processor = copy.deepcopy(self.result_processor)
            page.set_download_result(response_text)
            page.set_request_item(request_item)
            try:
                result_processor.process(page)
                if page.re_download is True:
                    page.re_download = False
                    self.download_and_process(page, sleep_time)
            except ForbiddenError as e:
                self.sock_puppet.set_headers_invalid(i)
                if self.sock_puppet.has_headers():
                    logger.warning('headers失效, 尝试更换headers')
                    page.get_request_item().get()['retry_times'] = 1
                    self.download_and_process(page, sleep_time)

    def spider_thread_execute(self, request_item, sleep_time):
        """
        爬虫的工作线程, 负责爬取结果以及结果处理, 如果结果有异常, 则换headers递归进行爬取
        :param request_item: 请求参数列表
        :param sleep_time: 睡眠时间
        :param headers:
        :param i: 辅助控制参数
        :return:
        """
        page = Page()
        page.set_request_item(request_item)
        self.download_and_process(page, sleep_time)
        return page

    def create(self):
        """
        将任务放进请求队列中
        :return:
        """
        length = len(self.request_items)
        for i in range(length - 1, -1, -1):
            self.request_queue.put(self.request_items[i])

    def run(self):
        """
        创建线程池, 前面的线程用来爬取结果
        :return:
        """
        logger.info('添加请求到线程池》》》')
        with ThreadPoolExecutor(max_workers=self.thread_num, thread_name_prefix='spider') as t:
            while not self.request_queue.empty():
                request_item = self.request_queue.get()
                future = t.submit(self.spider_thread_execute, request_item, self.sleep_time)
                future.add_done_callback(self.executor_callback)
                self.future_list.append(future)

    def executor_callback(self, task):
        """
        线程执行完毕的回调函数
        :param task:
        :return:
        """
        exception = task.exception()
        if exception is None:
            page = task.result()
            try:
                if page:
                    self.pipeline.process(page)
            except Exception as e:
                logger.error(e)
                sys.exit(-1)
        else:
            raise exception


