# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals

import random

from twisted.internet.error import TCPTimedOutError

from domainPro.myextend import pro

# useful for handling different item types with a single interface
import requests
import json,time
from scrapy.http import HtmlResponse

proxy_url ='http://dps.kdlapi.com/api/getdps/?orderid=960341636111633&num=10&pt=1&format=json&sep=1'

User_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
        "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]

class DownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    #拦截请求
    def process_request(self, request, spider):
        # UI伪装
        request.headers['User-Agent'] = random.choice(User_agent_list)
        #proxy = random.choice(pro.proxy_list)
        #request.meta['proxy'] = "https://%(proxy)s" % {'proxy': proxy}
        return None
    #拦截响应
    def process_response(self, request, response, spider):
        return response
        # if spider.name=="acgyd" or spider.name=="acgpc":
        #     bro = spider.bro  # 获取爬虫类中定义的浏览器对象
        #     # 挑选指定的响应对象进行篡改
        #     # 通过URL指定Request
        #     # 通过request指定respons
        #     bro.get(request.url)  # 队5个板块进行请求
        #     time.sleep(2)
        #     page_text = bro.page_source  # 包含了动态加载的新闻数据
        #     # response#5大板块对应的响应对象
        #     # 进行篡改
        #     # 实例化一个新的响应对象，包含自动加载的相应数据
        #     # 如何获取动态加载的新闻数据
        #     # 基于selenium便捷获取动态加载数据
        #     new_response = HtmlResponse(url=request.url, body=page_text, encoding='utf-8', request=request)
        #     return new_response
        # else:
        #     return response
        # # Called with the response returned from the downloader.


    #拦截发生异常的请求
    # 拦截发生异常的请求
    def process_exception(self, request, exception, spider):
        # 代理异常
        if isinstance(exception, TCPTimedOutError):
            print('Got exception: %s' % (exception))
            self.process_request(request, spider)  # 连接超时才启用代理ip机制
            return request
        elif isinstance(exception, ConnectionRefusedError):
            print('Got exception: %s' % (exception))
            self.process_request(request, spider)
            return request
        elif isinstance(exception, BaseException):
            print('Got exception: %s' % (exception))
            self.process_request(request, spider)
            return request


