# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

import logging
import requests
import json
import random

from scrapy.exceptions import IgnoreRequest

# 用户代理池
class RandomUserAgentMiddleware():
    def __init__(self):
        self.logger = logging.getLogger(__name__)

    def process_request(self, request, spider):
        user_agent = random.choice(spider.settings.get("USER_AGENTS"))
        request.headers["User-Agent"] = user_agent
        self.logger.debug('-当前使用的User-Agent：' + user_agent)

# 连接Cookie池
class CookiesMiddleware(object):
    def __init__(self, cookies_pool_url):
        self.logger = logging.getLogger(__name__)
        self.cookies_pool_url = cookies_pool_url

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            cookies_pool_url = crawler.settings.get('COOKIES_POOL_URL')
        )

    def _get_random_cookies(self):
        try:
            response = requests.get(self.cookies_pool_url)
            if response.status_code == 200:
                return json.loads(response.text)
        except ConnectionError:
            return None

    def process_request(self, request, spider):
        cookies = self._get_random_cookies()
        if cookies:
            request.cookies = cookies
            self.logger.debug('--当前使用的Cookies：' + json.dumps(cookies))
        else:
            self.logger.debug('No Valid Cookies')

    # 微博的反爬虫非常厉害，可能会跳转到封号的页面当中去
    def process_response(self, request, response, spider):
        if response.status in [300, 301, 302, 303]:
            try:
                redirect_url = response.headers['location']
                # 跳转到登录页面，cookies失效了
                if 'passport' in redirect_url:
                    self.logger.warning('Need Login, Updating Cookies')
                # "https://weibo.cn/pub/"，2020-03-01
                elif 'pub' in redirect_url:
                    self.logger.warning('爷不知道跳到这个网址是干什么orz')
                # 若账号被封号
                elif 'weibo.cn/security' in redirect_url:
                    self.logger.warning('Account is Locked!')
                # 重新请求
                request.cookies = self._get_random_cookies()
                return request
            except:
                raise IgnoreRequest
        else:
            return response


# 代理池对接
class ProxyMiddleware():
    def __init__(self, proxy_url):
        self.logger = logging.getLogger(__name__)
        self.proxy_url = proxy_url

    def get_random_proxy(self):
        try:
            response = requests.get(self.proxy_url)
            if response.status_code == 200:
                proxy = response.text
                return proxy
        except requests.ConnectionError:
            return False

    def process_request(self, request, spider):
        # 赋值代理的判断条件是当前 retry_times 不为空，即 第一次请求失败之后才启用代理（没有失败直接爬取）
        # 因为使用代理访问速度会慢一些
        if request.meta.get('retry_times'):
            proxy = self.get_random_proxy()
            if proxy:
                uri = 'https://{proxy}'.format(proxy=proxy)
                self.logger.debug('---使用代理：' + proxy)
                request.meta['proxy'] = uri

    @classmethod
    def from_crawler(cls, crawler):
        settings = crawler.settings
        return cls(
            proxy_url = settings.get("PROXY_URL")
        )


