# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

import random
import scrapy,requests
from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from settings import MY_USER_AGENT,count,ipPool

"""
或者
from settings import MY_USER_AGENT
 def process_request(self, request, spider):
        agent = random.choice(MY_USER_AGENT)
        request.headers['User-Agent'] = agent
"""


# 设置User-Agent,再在settings文件中注册开启管道
class MyUserAgentMiddleware(UserAgentMiddleware):

    def __init__(self, user_agent):
        self.user_agent = user_agent

    # 从settings文件中读取MY_USER_AGENT
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            user_agent=crawler.settings.get('MY_USER_AGENT')
        )

    def process_request(self, request, spider):
        agent = random.choice(self.user_agent)
        # 这句代码，给请求设置了随机user_agent
        request.headers['User-Agent'] = agent


class RandomUserAgent(object):

    def process_request(self, request, spider):
        agent = random.choice(MY_USER_AGENT)
        request.headers['User-Agent'] = agent


# 设置动态ip代理
class ProxyMiddleware(object):
    def process_request(self,request,spider):
        # 随机选中一个ip
        ip = random.choice(ipPool)
        print('当前ip', ip, '-----', count['count'])
        # 更换request的ip----------这句是重点
        request.meta['proxy'] = ip
        # 如果循环大于某个值,就清理ip池,更换ip的内容
        if count['count'] > 50:
            print('-------------切换ip------------------')
            count['count'] = 0
            ipPool.clear()
            ips = requests.get('http://http.tiqu.letecs.com/getip3?num=50&type=1&pro=&city=0&yys=0&port=1&time=1&ts=0&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions=&gm=4')
            for ip in ips.text.split('\r\n'):
                ipPool.append('http://' + ip)
        # 每次访问,计数器+1
        count['count'] += 1
        return None

