import requests
import redis
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy import signals, Spider
from scrapy.http import Request, Response
from loguru import logger


# 这里的代理要用Redis代理池
class AmazonDownloaderMiddleware:
    # 基本配置
    # 付费ip配置
    API_URL = "修改为你的API"
    USERNAME = "用户"
    PASSWORD = "密码"

    # redis
    client = redis.Redis(decode_responses=True)
    redis_key = ""  # ip_pool在Redis中的名称(默认为"spider名称:ip")
    IP_POOL_SIZE = 5  # ip池的大小
    CLEAR_ON_START = True  # 程序结束是否保留ip_pool(默认不保留)
    # 关于重试
    DOWNLOAD_TIMEOUT = 10  # 超时认定时间
    MAX_RETRY_TIMES = 3  # 请求最大重试次数[建议保持和ip_pool_size一致]
    RETRY_ENABLED = 'True'  # 默认进行重试

    # 日志记录
    proxy_ip_count = 0  # 本次程序运行使用的ip数量

    @classmethod
    def from_crawler(cls, crawler):
        # Redis配置
        cls.redis_key = crawler.spider.redis_key + ":ip"  # 指定Redis的(表)名
        # 得在开时的时候删除, 因为结束的时候大多是意外中断,程序走不到这
        if cls.CLEAR_ON_START:
            cls.client.delete(cls.redis_key)  # 删除(表)
            logger.debug("删除成功!!!")

        # 初始化ip(池)
        # #上线环境
        for _ in range(cls.IP_POOL_SIZE):
            cls.client.sadd(cls.redis_key, cls.get_new_proxy_ip())

        # for debug
        # init_proxy_ip = "http://2.2.2.2:2222"  # 这里先做单ip的Redis联动
        # cls.client.sadd(cls.redis_key, init_proxy_ip)

        # 设置"关闭信号", 触发"关闭函数"
        s = cls()
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def spider_closed(self):
        logger.info(f"共计消耗{self.proxy_ip_count}个代理IP")
        self.client.close()  # 关闭连接
        pass

    def process_request(self, request: Request, spider):
        # 设置UA和Cookie
        request.headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
        request.headers['Cookie'] = 'i18n-prefs=USD; session-id=130-9348969-9472654; ubid-main=132-4278027-6860024; session-id-time=2082787201l; lc-main=zh_CN; skin=noskin; sp-cdn="L5Z9:HK"; session-token=QC9GuGrgPtoemDrl8yt4qjZfzYcOCIGo46WhhyBFQyDh67xyLui9v+iAmFWZSP8CWhmTJ+Wpt0x8d1FgWfsnMBaJH184v4Gt2RH9X4ODNe4uDHdigYh3GbXcwKTmYFirflc659+prBxxKqgNz3eBRdJLh3Gi7n1U2AIthuQ76Zmz1W5qWqt7kB27G8zCgTCVETvPniAgkIWC9x0Oq2/U33pWPSsx0olqRGLbWuLEos04MYDR4sl0gU+NHxPERVHpF4NvnCOIUCmKA8j9ZNQyqU/hnCzupxIoKDjQtIiu0hRvauhzN6xSQnfv18z5mZsJfbb9pwZZzw1JDzh1cVdO9xXSwaD6tbgC; csm-hit=tb:s-M5GJ0KP7ZT0HZXTRVN95|1738389093979&t:1738389094428&adb:adblk_yes'
        # 设置代理
        request.meta['proxy'] = self.client.srandmember(self.redis_key, 1)[0]  # 从ip池中获取一个ip
        print(f"本次请求ip:{request.meta['proxy']}")
        # 设置超时&retry的次数
        request.meta['download_timeout'] = self.DOWNLOAD_TIMEOUT
        request.meta['max_retry_times'] = self.MAX_RETRY_TIMES
        # 重试次数
        request.meta.setdefault('retry_times', 0)  # 检测是否是首次重试
        if request.meta['retry_times'] != 0:  # 说进行了重试
            logger.info(f"使用ip:{request.meta['proxy']},进行重试:{request.meta['retry_times']}/{request.meta['max_retry_times']} 网址:{request.url}")
        return None

    def process_response(self, request, response: Response, spider):
        logger.info(f"请求 URL: {request.url}, 状态码: {response.status}")
        if response.status != 200:
            logger.debug(f"已到达重试上限 reason:状态码错误; 更新IP Reason:状态码: {response.status}; URL:{request.url}")
            return self.update_proxy_ip(request)
        return response

    def process_exception(self, request, exception, spider):
        logger.debug(f"已到达重试上限 reason:异常抛出; 更新IP Reason:报错类型: {type(exception)}, 报错内容: {exception}; URL:{request.url}")
        return self.update_proxy_ip(request)

    # 更新ip
    def update_proxy_ip(self, request: Request) -> Request:
        """
        :description: "乐观锁"思想判断是否需要获取新ip or 更新ip
        :param request: 本次请求的Request对象
        :return: 返回更新好的Request对象
        """
        # 需要拼接 鉴权信息
        request_ip = res_proxy_ip = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": self.USERNAME, "pwd": self.PASSWORD, "proxy": request.meta['proxy'][7:]}
        # 如果有,说明ip是真过期(or用不了)了,需要更新
        if self.client.sismember(self.redis_key, request_ip):
            self.client.srem(self.redis_key, request_ip)  # 删除
            self.client.sadd(self.redis_key, self.get_new_proxy_ip())  # 加一个新的
        request.meta['proxy'] = self.client.srandmember(self.redis_key, 1)[0]  # 再随机获取一个
        return request

    # 获取ip
    @classmethod
    def get_new_proxy_ip(cls):
        """
        :description: 获取一个ip地址
        :return: 直接返回一个携带鉴权的ip地址
        """
        proxy_ip = requests.get(cls.API_URL).text  # 获取付费ip
        res_proxy_ip = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": cls.USERNAME, "pwd": cls.PASSWORD, "proxy": proxy_ip}  # proxy鉴权(设置白名单的请自行修改)
        # 日志记录
        cls.proxy_ip_count += 1
        logger.info(f"获取第{cls.proxy_ip_count}个ip: {res_proxy_ip}")
        return res_proxy_ip


class MyRetryDownloaderMiddleware(RetryMiddleware):

    def process_response(self, request: Request, response: Response, spider: Spider) -> Request | Response:
        if response.status != 200:
            logger.debug(f"父类self.retry_http_codes: {self.retry_http_codes}")
            logger.debug(f"当前状态码情况{response.status}")
        return super().process_response(request, response, spider)

    def process_exception(self, request, exception, spider):
        logger.debug(f"父类self.exceptions_to_retry: {self.exceptions_to_retry}")
        logger.debug(f"当前报错情况{type(exception), exception}")
        super().process_exception(request, exception, spider)
