# -*- coding: utf-8 -*-
import random
import scrapy
import time
from scrapy import log

from fake_useragent import UserAgent


# #仿照scrapy内置的UserAgentMiddleware，来自定义设置user-agent的中间件。
# class RandomUserAgentMiddleWare(object):
#     def __init__(self, crawler):
#         super(RandomUserAgentMiddleWare, self).__init__()
#         self.ua = UserAgent()
#         # 从配置文件settings中读取RANDOM_UA_TYPE值,如果配置不存在，则采用默认的random进行随机。
#         self.ua_type = crawler.settings.get('RANDOM_UA_TYPE', 'random')
#     # 注意：from_crawler函数名以及参数必须和内置的保持一致
#     @classmethod
#     def from_crawler(cls, crawler):
#         # 在scrapy内置请求头中间件中，该方法的作用是返回了当前类的对象
#         return cls(crawler)
#     def process_request(self, request, spider):
#         # 该方法是处理请求头的核心方法，在该方法内部指定请求头的User_Agent值。
#         # request.headers.setdefault(b"User-Agent", random.choice(self.user_agent_list))
#         def get_user_agent():
#             # 返回的就是最终的User-Agent，类似于对象.属性
#             return getattr(self.ua, self.ua_type)
#         request.headers.setdefault(b"User-Agent", get_user_agent())


class RandomUserAgentMiddlware(object):
    #随机跟换user-agent
    def __init__(self,crawler):
        super(RandomUserAgentMiddlware,self).__init__()
        self.ua = UserAgent()
        self.ua_type = crawler.settings.get('RANDOM_UA_TYPE','random')#从setting文件中读取RANDOM_UA_TYPE值

    @classmethod
    def from_crawler(cls,crawler):
        return cls(crawler)

    def process_request(self,request,spider):  ###系统电泳函数
        def get_ua():
            return getattr(self.ua,self.ua_type)
        user_agent_random=get_ua()
        request.headers.setdefault('User_Agent',get_ua())
        pass

# logger = logging.getLogger()

class ProxyMiddleWare(object):
    """docstring for ProxyMiddleWare"""
    def __init__(self):
        while 1:
            with open('E:\\analizeLago\\proxies.txt', 'r') as f:
                proxies = f.readlines()
            if proxies:
                break
            else:
                time.sleep(1)
        self.proxies = proxies

    def process_request(self, request, spider):
        '''对request对象加上proxy'''
        proxy = self.get_random_proxy()
        print("this is request ip:" + proxy)
        request.meta['proxy'] = proxy

    # def process_response(self, request, response, spider):
    #     '''对返回的response处理'''
    #     # 如果返回的response状态不是200，重新生成当前request对象
    #     if response.status != 200:
    #         proxy = self.get_random_proxy()
    #         print("this is response ip:" + proxy)
    #         # 对当前reque加上代理
    #         request.meta['proxy'] = proxy
    #         return request
    #     return response

    def get_random_proxy(self):
        '''随机从文件中读取proxy'''
        proxy = random.choice(self.proxies).strip()
        return proxy
