from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
from queue import LifoQueue, Queue
import json
import requests
from requests.exceptions import HTTPError, ConnectTimeout, SSLError
from Error import *
import threading
from hashlib import md5
from Pipeline import DefaultPipeline
from ResultProcessor import DefaultResultProcessor
import time


def download(request_item, sleep_time):
    """
    下载器
    :param sleep_time: 线程睡眠时间
    :param request_item: 请求体
    :return: 请求结果(requests.Response)
    """
    method = request_item.get()['method'].upper()
    retry_times = request_item.get()['retry_times']
    url = request_item.get()['url']
    headers = request_item.get()['headers']
    timeout = request_item.get()['timeout']
    params = request_item.get()['params']
    request_body = request_item.get()['request_body']
    while retry_times > 0:
        try:
            if method == 'GET':
                print('发送get请求, url=' + url + ' 剩余尝试次数: ' + str(retry_times))
                res = requests.get(url, params=params, headers=headers, timeout=timeout, verify=request_item.verify)
                time.sleep(sleep_time)
                return res
            elif method == 'POST':
                res = requests.post(url, data=json.dumps(request_body), timeout=timeout, verify=request_item.verify)
                time.sleep(sleep_time)
                return res
        except ConnectTimeout as e:
            print('连接超时, 尝试重连')
            retry_times -= 1
        except HTTPError as e:
            if e.response.status_code == 404:
                raise NotFoundError
            elif e.response.status_code == 403:
                raise ForbiddenError
            else:
                raise e
        except SSLError as e:
            retry_times -= 1
            print('SSL证书验证失败, 尝试关闭证书验证, 剩余尝试次数: ' + str(retry_times))
            request_item.verify = False
    if retry_times == 0:
        raise TimeoutError


class Spider(object):
    def __init__(self, request_items, pipeline=None, result_processor=None, thread_num=3, headers_list=None, retry_times=3, sleep_time=1):
        """
        :param request_items: RequestItem的列表
        :param pipeline: 管道类
        :param result_processor: 结果处理类
        :param thread_num: 线程数量
        :param headers_list: headers的列表
        :param sleep_time: 爬虫间隔时间, 单位秒, 可以为浮点数
        """
        self.request_items = request_items
        self.pipeline = DefaultPipeline() if pipeline is None else pipeline
        self.result_processor = DefaultResultProcessor() if result_processor is None else result_processor
        self.retry_times = retry_times
        self.thread_num = thread_num
        self.request_queue = LifoQueue()
        self.headers_list = headers_list
        self.headers_list_i = 0
        self.sleep_time = sleep_time
        self.response_queue = Queue()
        self.result_md5_set = set()
        self.m5 = md5()
        self.request_done = False
        self.lock = threading.Lock()
        self.create()

    def has_headers(self):
        """
        检查是否有headers剩余
        :return:
        """
        if self.headers_list_i < len(self.headers_list):
            return True
        else:
            return False

    def get_headers(self):
        """
        从headers列表里获取headers
        :return:
        """
        res = self.headers_list[self.headers_list_i]
        self.headers_list_i += 1
        return res

    def filter(self, response):
        """
        filter对结果进行过滤去重, 利用数据算出md5值, 如果已经存在相同数据, 则不需要将请求结果加入回应队列
        :param response: 请求结果 requests.Response
        :return: None
        """
        self.m5.update(response.content)
        hexdigest = self.m5.hexdigest()
        if hexdigest not in self.result_md5_set:
            self.result_md5_set.add(hexdigest)
            self.response_queue.put(response.text)

    def create(self):
        """
        将任务放进请求队列中
        :return:
        """
        length = len(self.request_items)
        for i in range(length - 1, -1, -1):
            self.request_queue.put(self.request_items[i])

    def run(self):
        """
        创建线程池, 前面的线程用来爬取结果, 最后一个线程用来处理结果
        :return:
        """
        with ThreadPoolExecutor(max_workers=self.thread_num) as t:
            future_list = []
            print('添加请求到线程池》》》')
            while not self.request_queue.empty():
                request_item = self.request_queue.get()
                # 得到返回结果
                future = t.submit(download, request_item, self.sleep_time)
                future.add_done_callback(self.executor_callback)
                future_list.append(future)
            future = t.submit(self.processor_thread)
            self.pipeline.process(future.result())

    def processor_thread(self):
        """
        这是处理结果的线程,
        :return:
        """
        res_list = []
        while(True):
            try:
                print('response queue size: ' + str(self.response_queue.qsize()))
                # 如果等待4秒队列仍为空的话，则可以认为已经爬取完毕了
                response_text = self.response_queue.get(timeout=4)
                res = self.result_processor.process(response_text)
                res_list.append(res)
                self.response_queue.task_done()
            except Exception as e:
                return res_list

    def executor_callback(self, task):
        """
        线程执行完毕的回调函数
        :param task:
        :return:
        """
        exception = task.exception()
        if exception is None:
            res = task.result(timeout=5)
            self.filter(res)

        elif isinstance(exception, NotFoundError):
            print('not found')
        elif isinstance(exception, ForbiddenError):
            print('add cookie')
        elif isinstance(exception, TimeoutError):
            print('timeout error')
        else:
            print(str(exception))

