# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import scrapy
from cloudscraper import User_Agent
from requests_toolbelt import user_agent
from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.core.downloader.handlers.http11 import TunnelError

from util import ip


class TutorialSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        print(f"处理爬虫输入3")
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.
        print(f"处理爬虫输出4")
        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    async def process_start(self, start):
        # Called with an async iterator over the spider start() method or the
        # maching method of an earlier spider middleware.
        async for item_or_request in start:
            yield item_or_request

    def spider_opened(self, spider):
        spider.logger.info("1111111111111111111111Spider opened: %s" % spider.name)

    def spider_closed(self, spider):
        spider.logger.info("2222222222222222222222Spider opened: %s" % spider.name)

class TutorialDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.
        print(f"处理下载输入1")
        request.headers['User-Agent'] = "555555555555"
        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.
        print(f"处理下载输出2")
        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        # return response
        return scrapy.http.Response(url="https://www.baidu.com", body=b'xingxingla')
    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


import random
from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from util.ip import get_proxies_poll,get_proxies

class RandomUserAgentMiddleware(UserAgentMiddleware):
    """随机UserAgent中间件"""

    def __init__(self, user_agents):
        self.user_agents = user_agents

    @classmethod
    def from_crawler(cls, crawler):
        settings = crawler.settings
        user_agents = settings.get('USER_AGENTS', [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'
        ])
        return cls(user_agents)

    def process_request(self, request, spider):
        request.headers['User-Agent'] = random.choice(self.user_agents)


class ProxyMiddleware:
    """代理中间件"""
    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        s.settings = crawler.settings
        return s


    def process_request(self, request, spider):
        request.meta['proxy'] = random.choice(self.settings['PROXIES'])

    def process_response(self, request, response, spider):
        return response


import random
from scrapy.core.downloader.handlers.http11 import TunnelError


class ProxiesPoolMiddleware:

    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        # 初始化代理池
        s.proxy_poll = cls.get_initial_proxies()
        return s

    @staticmethod
    def get_initial_proxies():
        """初始化代理池"""
        try:
            # 从您的util.ip模块获取代理
            from util.ip import get_proxies
            proxies = get_proxies()
            if isinstance(proxies, list) and proxies:
                return proxies
            elif isinstance(proxies, dict):
                return [proxies]
            else:
                print("警告: 获取的代理格式不正确")
                return []
        except Exception as e:
            print(f"初始化代理池失败: {e}")
            return []

    def process_request(self, request, spider):
        if not self.proxy_poll:
            self.proxy_poll = self.get_initial_proxies()
            if not self.proxy_poll:
                spider.logger.warning("代理池为空，跳过代理设置")
                return

        # 随机选择一个代理
        ip = random.choice(self.proxy_poll)

        # 添加调试和错误处理
        if isinstance(ip, dict) and 'http' in ip:
            proxy_url = ip['http']
            spider.logger.info(f"使用代理 {proxy_url} 发起请求")
            request.meta['proxy'] = proxy_url
            request.meta['ip'] = ip  # 保存ip信息供异常处理使用
        elif isinstance(ip, str):
            # 如果返回的是字符串格式的代理
            spider.logger.info(f"使用代理 {ip} 发起请求")
            request.meta['proxy'] = ip
            request.meta['ip'] = {'http': ip}  # 创建ip字典
        else:
            spider.logger.error(f"代理格式错误: {ip}")
            # 不设置代理，直接返回
            return

    def process_response(self, request, response, spider):
        return response

    def process_exception(self, request, exception, spider):
        # 处理代理异常
        if isinstance(exception, TunnelError):
            ip_info = request.meta.get('ip', {})
            proxy_url = ip_info.get('http', '未知代理')
            spider.logger.error(f"代理异常 {type(exception).__name__}: {proxy_url}")

            # 从代理池中移除故障代理
            if ip_info in self.proxy_poll:
                self.proxy_poll.remove(ip_info)
                spider.logger.info(f"从代理池移除故障代理: {proxy_url}")

            # 如果代理池空了，重新获取
            if not self.proxy_poll:
                new_proxies = self.get_initial_proxies()
                if new_proxies:
                    self.proxy_poll = new_proxies
                    spider.logger.info("重新填充代理池")
                else:
                    spider.logger.error("无法获取新代理，代理池为空")
                    return None

            # 选择新代理并重试请求
            new_ip = random.choice(self.proxy_poll)
            if isinstance(new_ip, dict) and 'http' in new_ip:
                request.meta['proxy'] = new_ip['http']
                request.meta['ip'] = new_ip
                spider.logger.info(f"使用新代理重试: {new_ip['http']}")
            elif isinstance(new_ip, str):
                request.meta['proxy'] = new_ip
                request.meta['ip'] = {'http': new_ip}
                spider.logger.info(f"使用新代理重试: {new_ip}")

            return request

        return None
# -*- coding: utf-8 -*-

# from scrapy import signals


# class ProxyDownloaderMiddleware:
#     _proxy = ('j579.kdltps.com', '15818')
#
#     def process_request(self, request, spider):
#
#         # 用户名密码认证
#         username = "t16308832649894"
#         password = "p3x0pasy"
#         request.meta['proxy'] = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": ':'.join(ProxyDownloaderMiddleware._proxy)}
#
#         # 白名单认证
#         # request.meta['proxy'] = "http://%(proxy)s/" % {"proxy": proxy}
#
#         request.headers["Connection"] = "close"
#         return None
#
#     def process_exception(self, request, exception, spider):
#         """捕获407异常"""
#         if "'status': 407" in exception.__str__():  # 不同版本的exception的写法可能不一样，可以debug出当前版本的exception再修改条件
#             from scrapy.resolver import dnscache
#             dnscache.__delitem__(ProxyDownloaderMiddleware._proxy[0])  # 删除proxy host的dns缓存
#         return exception


class RetryMiddleware:
    """自定义重试中间件"""

    def process_response(self, request, response, spider):
        if response.status in [403, 429]:
            spider.logger.warning(f"遇到限制，等待重试: {response.status}")
            # 可以在这里添加等待逻辑
            return request
        return response

user_cookie = ["ahgisjgke;ogr1","gabehginujiegojeg2", "ahljegioagneg3", "ehlgjaiegoeogkg4", "ehjgojejgegg5"]
class CookieMiddleware:
    """随机UserAgent中间件"""
    def process_request(self, request, spider):
        print(f"处理下载请求11")
        print(request.headers,"====")
        request.headers['cookie'] = random.choice(user_cookie)
        print(request.headers, "====")

    def process_response(self, request, response, spider):
        print(f"处理下载响应22")
        return  response
