# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import json
import random
import time
import traceback
import os
from proxy.proxy_pool import Proxy
os.environ['NO_PROXY'] = 'elasticservice.azurewebsites.net'
from scrapy import signals
import requests
from scrapy.http import HtmlResponse

from twisted.internet import defer
from twisted.internet.error import TimeoutError, DNSLookupError, \
    ConnectionRefusedError, ConnectionDone, ConnectError, \
    ConnectionLost, TCPTimedOutError
from twisted.web.client import ResponseFailed
from scrapy.core.downloader.handlers.http11 import TunnelError
from scrapy.utils.project import get_project_settings

setting = get_project_settings()




class ProcessAllException_ProxyMiddleware:
    """
    处理 各种异常的下载中间件   并对代理部分进行特殊处理
    """
    ALL_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
                      ConnectionRefusedError, ConnectionDone, ConnectError,
                      ConnectionLost, TCPTimedOutError, ResponseFailed,
                      IOError, TunnelError)

    def __init__(self):
        self.proxy_pool = Proxy()
        self.error_url_info = {}
        self.abandon_url = []
        pass

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_response(self, request, response, spider):
        # 捕获状态码为40x/50x的response
        proxy_info = request.meta['proxy_info']

        if str(response.status).startswith('4') or str(response.status).startswith('5'):
            # 在日志中打印异常状态码
            print('url:{}的状态码异常: {}'.format(response.url, response.status))
            # 对代理的异常次数进行计数
            proxy_info['error_time'] = proxy_info.get('error_time', 0) + 1
            # 封装一个新的request对象 重新加入到调度队列
            return self.error_handle(request)

        # 请求正常 情况
        else:
            request.meta['error_status'] = 0

        self.proxy_pool.back(proxy_info)
        return response

    def process_exception(self, request, exception, spider):
        # 遇到异常情况   修改 代理的异常次数，并返回到代理池中
        proxy_info = request.meta['proxy_info']
        proxy_info['error_time'] += 1
        self.proxy_pool.back(proxy_info)
        # 捕获几乎所有的异常
        if isinstance(exception, self.ALL_EXCEPTIONS):
            # 在日志中打印异常类型

            print('url:{},捕获异常: {}'.format(request.url, exception))
            # 封装一个response，包含请求代理和错误次数返回给spider

        # 打印出未捕获到的异常
        print('not contained exception: %s' % exception)
        return self.error_handle(request)

    def error_handle(self, request):
        """
        请求异常的处理情况
        1  拷贝该请求对象
        2  修改不过滤该请求
        3  将 retry_times 修改为0   方便再次调用框架的retry 中间件 进行三次 retry
        4  删除代理信息
        5  记录异常次数
        :param request:
        :return:
        """
        url = request.url
        error_status = request.meta.get('error_status', 0)
        if error_status <= 20:
            error_status += 1
            new_request = request.copy()
            new_request.dont_filter = True
            # 修改 retry_times 使 retry 中间件重新生效  可以再次重试三次
            new_request.meta['retry_times'] = 0
            # 删除之前的代理信息  再次经过代理中间件时 会切换代理
            new_request.meta['proxy'] = None
            new_request.meta['error_status'] = error_status
            return new_request
        else:
            self.abandon_url.append(url)
            pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

    def spider_closed(self, spider):
        """
        1  关闭代理池链接
        2  将异常url 保存在本地
        3  将异常的txt_url 保存在本地
        :param spider:
        :return:
        """
        self.proxy_pool.r.close()
        with open('{}/{}_abandon_url.json'.format(spider.JSON_DIR, spider.name), 'w', encoding='utf-8') as f:
            json.dump(self.abandon_url, f)
        with open('{}/{}_txt_url_error_li.json'.format(spider.JSON_DIR, spider.name), 'w', encoding='utf-8') as f:
            json.dump(spider.txt_url_error_li, f)


class ProcessAllExceptionMiddleware:
    """
    处理 各种异常的下载中间件   并对代理部分进行特殊处理
    """
    ALL_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
                      ConnectionRefusedError, ConnectionDone, ConnectError,
                      ConnectionLost, TCPTimedOutError, ResponseFailed,
                      IOError, TunnelError)

    def __init__(self):
        self.error_url_info = {}
        self.abandon_url = []
        pass

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_response(self, request, response, spider):
        # 捕获状态码为40x/50x的response

        if str(response.status).startswith('4') or str(response.status).startswith('5'):
            # 在日志中打印异常状态码
            print('url:{}的状态码异常: {}'.format(response.url, response.status))

            # 封装一个新的request对象 重新加入到调度
            return self.error_handle(request)
            
        # 请求正常 情况
        else:
            request.meta['error_status'] = 0

        return response

    def process_exception(self, request, exception, spider):
        # 捕获几乎所有的异常
        if isinstance(exception, self.ALL_EXCEPTIONS):
            # 在日志中打印异常类型

            print('url:{},捕获异常: {}'.format(request.url, exception))
            # 封装一个response，包含请求代理和错误次数返回给spider
            return self.error_handle(request)

        # 打印出未捕获到的异常
        print('not contained exception: %s' % exception)

    def error_handle(self, request):
        """
        - 获取到该请求的url
        - 获取该请求中的异常次数
            - 在阈值范围内，拷贝该请求。
                - 对该重试请求不过滤
                - 重置 retry_times ,使 retry 中间件重新生效  可以再次重试三次
                - 清空meta中的代理信息
                - 将本次异常处理次数计入meta中。
                - 返回该请求

            - 不在阈值范围内
                - 加入到异常列表中，不再重试
        """
        url = request.url
        error_status = request.meta.get('error_status', 0)
        if error_status <= 20:
            error_status += 1
            new_request = request.copy()
            # 需要重试，取消过滤
            new_request.dont_filter = True
            # 修改 retry_times 使 retry 中间件重新生效  可以再次重试三次
            new_request.meta['retry_times'] = 0
            # 删除之前的代理信息  再次经过代理中间件时 会切换代理
            new_request.meta['proxy'] = None
            new_request.meta['error_status'] = error_status
            return new_request
        else:
            self.abandon_url.append(url)
            pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

    def spider_closed(self, spider):
        with open('{}/{}_abandon_url.json'.format(spider.JSON_DIR, spider.name), 'w', encoding='utf-8') as f:
            json.dump(self.abandon_url, f)
        with open('{}/{}_txt_url_error_li.json'.format(spider.JSON_DIR, spider.name), 'w', encoding='utf-8') as f:
            json.dump(spider.txt_url_error_li, f)


