import random
import time
from scrapy import signals
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.utils.response import response_status_message


class ProxyMiddleware:
    """智能代理轮换中间件"""

    def __init__(self):
        self.proxy_list = [
            'http://user:pass@proxy1:port',
            'http://user:pass@proxy2:port',
            'http://user:pass@proxy3:port'
        ]
        self.proxy_failures = {}
        self.max_failures = 3

    def process_request(self, request, spider):
        # 排除登录请求不使用代理
        if 'login' in request.url:
            return

        # 选择可用代理
        active_proxies = [p for p in self.proxy_list if self.proxy_failures.get(p, 0) < self.max_failures]
        if not active_proxies:
            active_proxies = self.proxy_list  # 全部重试

        proxy = random.choice(active_proxies)
        request.meta['proxy'] = proxy
        request.meta['proxy_fail_key'] = proxy  # 标记当前使用的代理


class HeadersMiddleware:
    """动态请求头管理中间件"""

    def process_request(self, request, spider):
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
            'Mozilla/5.0 (iPhone; CPU iPhone OS 16_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Mobile/15E148 Safari/604.1'
        ]

        request.headers['User-Agent'] = random.choice(user_agents)
        request.headers['Accept'] = '*/*'
        request.headers['Accept-Language'] = 'en-US,en;q=0.9'
        request.headers['Accept-Encoding'] = 'gzip, deflate, br'
        request.headers['Connection'] = 'keep-alive'

        if 'Referer' not in request.headers:
            request.headers['Referer'] = f'https://twitter.com/{spider.target_user}'

        # 添加动态浏览器指纹
        request.headers['Sec-Ch-Ua'] = '"Chromium";v="125", "Not.A/Brand";v="24"'
        request.headers['Sec-Ch-Ua-Mobile'] = '?0'
        request.headers['Sec-Ch-Ua-Platform'] = '"Windows"'
        request.headers['Sec-Fetch-Dest'] = 'empty'
        request.headers['Sec-Fetch-Mode'] = 'cors'
        request.headers['Sec-Fetch-Site'] = 'same-origin'


class CustomRetryMiddleware(RetryMiddleware):
    """自定义429错误处理+指数退避"""

    def process_response(self, request, response, spider):
        if response.status == 429:
            # 标记当前代理失败
            proxy_key = request.meta.get('proxy_fail_key')
            if proxy_key:
                spider.crawler.engine.downloader.middlewares.proxy_failures[proxy_key] = \
                    spider.crawler.engine.downloader.middlewares.proxy_failures.get(proxy_key, 0) + 1

            # 指数退避
            retry_after = int(response.headers.get('Retry-After', 30))
            spider.logger.warning(f'429 Too Many Requests. Retrying after {retry_after} seconds...')
            time.sleep(retry_after)
            return self._retry(request, 429, spider) or response

        return super().process_response(request, response, spider)