# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import random
import requests

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter


class MiddlerSpiderSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class MiddlerSpiderDownloaderMiddleware:
    def __init__(self):
        self.user_agent_list = [
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
            "HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
            "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
            "Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
            "Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
            "Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            "Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
            "Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
            "Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            "Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
            "Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"
        ]
        self.ip_list = []
        self.ip_url = "https://api.hailiangip.com:8522/api/getIpEncrypt?dataType=0&encryptParam=i6OcePKr4Cq0wPH1UJ%2FCOyBYXf0wSdR0KIhVhoMMPHvy912xFHA3Hogn7b2rQpv2v%2Bsma%2BfvZPJZSjKa6K%2BrHqWDkDfjYmCMgqdj%2FbyLhHFdHWoP3QjLYhuQTiVYAEx4SVroWnM6ns6JOmvmXdEjw0Ha%2ByfEPwJclGMY6HEoCZRXwMMY2Pp7wRNtgRIJmPbHvs3ERyFHZ9FAgNS8WBDIMt0Jv%2FQlqwlcd4gkrYI6AFg%3D"
        self.number = 0  # 代表当前的ip使用了几次
        self.result = 0  # 当前使用的是第几个ip, 这里写零，代表没有使用代理ip

    # 获取ip
    def get_ip(self):
        resp = requests.get(self.ip_url).json()["data"]
        # 清空列表
        self.ip_list.clear()
        for data in resp:
            self.ip_list.append({
                "ip": data["ip"],
                "port": data["port"]
            })
        print(self.ip_list)

    # 切换IP
    def change_ip_data(self, request):
        if request.url.split(":")[0] == "http":
            request.meta["proxy"] = "http://" + str(self.ip_list[self.result - 1]["ip"]) + ":" + str(
                self.ip_list[self.result - 1]["port"])
        if request.url.split(":")[0] == "https":
            request.meta["proxy"] = "https://" + str(self.ip_list[self.result - 1]["ip"]) + ":" + str(
                self.ip_list[self.result - 1]["port"])

    # ip使用的异常处理
    def ip_user(self, request):
        try:
            self.change_ip_data(request)
        except:
            if self.result == 0 or self.result == 5:   # result为 0 代表列表中没有ip    result 为 5 代表ip已经完，需要重新获取
                self.get_ip()      # 获取ip
                self.result += 1

            self.result += 1  # 当列表中的ip,不能用的时候，换下一个ip
            self.ip_user(request)

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # 随机从列表中获取一个UA充当头部信息
        request.headers["User-Agent"] = random.choice(self.user_agent_list)
        if self.result == 0 or self.result == 5:
            self.get_ip()
            self.result = 1

        if self.number == 3:    # 一个ip使用三次
            self.result += 1
            self.number = 0
        else:
            self.change_ip_data(request)
            self.result += 1
            self.number += 1

        self.ip_user(request)

        # 这种方式不太实用，需要每跟换代理ip
        # 从列表当中随机获取IP地址
        # request.meta["proxy"] = "http://" + random.choice(self.ip_list)

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)
