# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from twisted.internet import defer
from twisted.internet.error import (
    TimeoutError, DNSLookupError,
    ConnectionRefusedError,
    ConnectionDone, ConnectError,
    ConnectionLost, TCPTimedOutError)
from scrapy.exceptions import NotConfigured
from scrapy.core.downloader.handlers.http11 import TunnelError
from scrapy.http import HtmlResponse
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.utils.response import response_status_message
from twisted.web.client import ResponseFailed
import crawler.ippool as ippool
import time
import logging
import random
# useful for handling different item types with a single interface


class CrawlerSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class CrawlerDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class ProxyMiddleware:
    """
    1.使用代理IP
    2.根据IP池I池IP数量设置请求延时
    """

    @classmethod
    def from_crawler(cls, crawler):
        dm = cls()
        crawler.signals.connect(dm.spider_closed, signal=signals.spider_closed)
        crawler.signals.connect(dm.spider_opened, signal=signals.spider_opened)
        return dm

    def __init__(self):
        self.ua_list = [ \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", \
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"
        ]

    def process_request(self, request, spider):
        '''
        1.使用代理IP
        2.UA
        '''
        ip = ippool.get()
        if ip:
            request.meta['proxy'] = f"http://{ip}"
            request.meta['proxy_ip'] = ip
            # logging.info(f'使用代理ip请求:{ip}')
        ua = random.choice(self.ua_list)
        request.headers['User-Agent'] = ua

    @staticmethod
    def spider_closed():
        ippool.close()

    @staticmethod
    def spider_opened():
        ippool.init()


class DelayMiddleware:
    """
    1.根据IP池I池IP数量设置请求延时
    """

    def __init__(self, min_delay, default_delay):
        self.min_delay = min_delay
        self.default_delay = default_delay

    @classmethod
    def from_crawler(cls, crawler):
        min_delay = crawler.spider.settings.get("DOWNLOAD_DELAY", 0.1)
        default_delay = crawler.spider.settings.get("DOWNLOAD_DELAY_DEFAULT", 2)
        return cls(min_delay, default_delay)

    def process_request(self, request, spider):
        '''
        使用代理IP
        '''
        size = ippool.size()
        delay = self.default_delay / (size + 1)
        if delay > self.min_delay:
            time.sleep(delay - self.min_delay)


def _remove_proxy(request):
    '''
    # 移除无效的代理IP
    '''
    if 'proxy_ip' in request.meta:
        ippool.remove(request.meta['proxy_ip'])
        del request.meta['proxy']
        del request.meta['proxy_ip']


class ExceptionMiddleware:
    """
    过滤连接失败的异常信息
    """
    EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
                           ConnectionRefusedError, ConnectionDone, ConnectError,
                           ConnectionLost, TCPTimedOutError, ResponseFailed,
                           IOError, TunnelError)

    def process_exception(self, request, exception, spider):
        if(
            isinstance(exception, self.EXCEPTIONS_TO_RETRY)
            and not request.meta.get('dont_retry', False)
        ):
            # 在日志中打印异常类型
            logging.error(f"请求发生异常, url = {request.url},\n exception={repr(exception)}")
            # 移除无效的代理IP
            if 'proxy_ip' in request.meta:
                ippool.remove(request.meta['proxy_ip'])
                del request.meta['proxy']
                del request.meta['proxy_ip']
        # return HtmlResponse(url='')


class CustomRetryMiddleware(RetryMiddleware):

    def process_response(self, request, response, spider):
        if request.meta.get('dont_retry', False):
            return response
        if response.status in self.retry_http_codes:
            reason = response_status_message(response.status)
            _remove_proxy(request)
            return self._retry(request, reason, spider) or response
        if str(response.status).startswith('4'):
            # 随意封装，直接返回response，spider代码中根据url==''来处理response
            request.meta['exception'] = True
            response = HtmlResponse(url='', request=request)
            return response
        return response

    def process_exception(self, request, exception, spider):
        if (
                isinstance(exception, self.EXCEPTIONS_TO_RETRY)
                and not request.meta.get('dont_retry', False)
        ):
            _remove_proxy(request)
            response = self._retry(request, exception, spider)
            if not response:
                request.meta['exception'] = True
                return HtmlResponse(url='', request=request)
            else:
                return response

