from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
from queue import LifoQueue
import json
import requests
from requests.exceptions import HTTPError, ConnectTimeout, SSLError
from v2.Error import *
from hashlib import md5
from v2.Pipeline import DefaultPipeline
from v2.ResultProcessor import DefaultResultProcessor
import time
import sys
import threading


class Spider(object):
    def __init__(self, request_items,
                 pipeline=None,
                 result_processor=None,
                 thread_num=3,
                 headers_list=None,
                 retry_times=3,
                 sleep_time=1):
        """
        :param request_items: RequestItem的列表
        :param pipeline: 管道类
        :param result_processor: 结果处理类
        :param thread_num: 线程数量
        :param headers_list: headers的列表
        :param sleep_time: 爬虫间隔时间, 单位秒, 可以为浮点数
        """
        self.request_items = request_items
        self.pipeline = DefaultPipeline() if pipeline is None else pipeline
        self.result_processor = DefaultResultProcessor() if result_processor is None else result_processor
        self.retry_times = retry_times
        self.thread_num = thread_num
        self.request_queue = LifoQueue()
        self.headers_list = headers_list
        self.len_headers_list = len(self.headers_list)
        self.headers_list_i = 0
        self.sleep_time = sleep_time
        self.result_md5_set = set()
        self.headers_list_can_use = [True for i in self.headers_list]
        self.m5 = md5()
        self.request_done = False
        self.thread_pool = None
        self.future_list = []
        self.lock = threading.Lock()
        self.create()

    def try_to_change_headers(self):
        """
        尝试更换headers
        :return:
        """
        for can in range(0, len(self.headers_list_can_use) - 1):
            if self.headers_list_can_use[can] is True:
                return self.headers_list[can], can
        return None, None

    def download(self, request_item, sleep_time, headers, i):
        """
        下载器
        :param sleep_time: 线程睡眠时间
        :param request_item: 请求体
        :return: 请求结果(requests.Response)
        """
        _headers = headers
        method = request_item.get()['method'].upper()
        retry_times = request_item.get()['retry_times']
        url = request_item.get()['url']
        timeout = request_item.get()['timeout']
        params = request_item.get()['params']
        request_body = request_item.get()['request_body']

        while retry_times > 0:
            try:
                if method == 'GET':
                    print('发送get请求, url=' + url + ' 剩余尝试次数: ' + str(retry_times))
                    res = requests.get(url, params=params, headers=_headers, timeout=timeout,
                                       verify=request_item.verify)
                    time.sleep(sleep_time)
                    _request_item = {
                        'url': url,
                        'method': method,
                        'params': params,
                        'request_body': request_body,
                        'headers': _headers,
                        'i': i
                    }
                    return res, _request_item
                elif method == 'POST':
                    res = requests.post(url, data=json.dumps(request_body), headers=_headers, timeout=timeout,
                                        verify=request_item.verify)
                    time.sleep(sleep_time)
                    _request_item = {
                        'url': url,
                        'method': method,
                        'params': params,
                        'request_body': request_body,
                        'headers': _headers,
                        'i': i
                    }
                    return res, _request_item
            except ConnectTimeout as e:
                print('连接超时, 尝试重连')
                retry_times -= 1
            except HTTPError as e:
                if e.response.status_code == 404:
                    raise NotFoundError(request_item)
                elif e.response.status_code == 403:
                    self.lock.acquire()
                    try:
                        print('headers失效, 尝试更换headers')
                        self.headers_list_can_use[i] = False
                        print(self.headers_list_can_use)
                        new_headers, _i = self.try_to_change_headers()
                    finally:
                        self.lock.release()
                    if new_headers is None:
                        print('headers已用完, 爬虫退出')
                        retry_times = 0
                    else:
                        _headers = new_headers
                else:
                    raise e
            except SSLError as e:
                retry_times -= 1
                print('SSL证书验证失败, 尝试关闭证书验证, 剩余尝试次数: ' + str(retry_times))
                request_item.verify = False
        if retry_times == 0:
            raise TimeoutError

    def get_headers(self):
        """
        从headers列表里获取headers, 这里的逻辑是, 如果线程数大于headers数量, 超出headers数量的线程循环用前面的headers
        :return:
        """
        temp = self.headers_list_i
        res = self.headers_list[self.headers_list_i]
        self.headers_list_i = (self.headers_list_i + 1) % self.len_headers_list
        return res, temp

    def spider_thread_execute(self, request_item, sleep_time, headers, i):
        """
        爬虫的工作线程, 负责爬取结果以及结果处理, 如果结果有异常, 则换headers递归进行爬取
        :param request_item: 请求参数列表
        :param sleep_time: 睡眠时间
        :param headers:
        :param i: 辅助控制参数
        :return:
        """
        try:
            res, request_item = self.download(request_item=request_item, sleep_time=sleep_time, headers=headers, i=i)
            response_text = self.filter(res)
            if response_text:
                result = self.result_processor.process(response_text=response_text, request_item=request_item)
                return result
        except ForbiddenError as e:
            print('headers: ' + str(e.request_item['headers']) + ' 访问次数已经上限, 尝试更换headers')
            self.lock.acquire()
            try:
                self.headers_list_can_use[e.request_item['i']] = False
                new_headers, i = self.try_to_change_headers()
            finally:
                self.lock.release()
            if new_headers:
                request_item.get()['retry_times'] = 1
                self.spider_thread_execute(request_item=request_item, sleep_time=sleep_time, headers=new_headers, i=i)
            else:
                print('headers数已经用完, 爬虫退出')

        except Exception as e:
            print(e)
            pass

    def filter(self, response):
        """
        filter对结果进行过滤去重, 利用数据算出md5值, 如果已经存在相同数据, 则不需要将请求结果加入回应队列
        :param response: 请求结果 requests.Response
        :return: None
        """
        self.m5.update(response.content)
        hexdigest = self.m5.hexdigest()
        if hexdigest not in self.result_md5_set:
            self.result_md5_set.add(hexdigest)
            return response.text
        return None

    def create(self):
        """
        将任务放进请求队列中
        :return:
        """
        length = len(self.request_items)
        for i in range(length - 1, -1, -1):
            self.request_queue.put(self.request_items[i])

    def run(self):
        """
        创建线程池, 前面的线程用来爬取结果
        :return:
        """
        print('添加请求到线程池》》》')
        with ThreadPoolExecutor(max_workers=self.thread_num, thread_name_prefix='spider') as t:
            while not self.request_queue.empty():
                request_item = self.request_queue.get()
                headers, i = self.get_headers()
                future = t.submit(self.spider_thread_execute, request_item, self.sleep_time, headers, i)
                future.add_done_callback(self.executor_callback)
                self.future_list.append(future)

    def executor_callback(self, task):
        """
        线程执行完毕的回调函数
        :param task:
        :return:
        """
        exception = task.exception()
        if exception is None:
            res = task.result()
            try:
                if res:
                    self.pipeline.process(result_item=res)
            except Exception as e:
                print(e)
                sys.exit(-1)


