from scrapy import signals
from itemadapter import ItemAdapter
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from w3lib.url import canonicalize_url



# 随机请求头
class RandomUserAgentMiddleware(UserAgentMiddleware):
    def process_request(self, request, spider):
        if 'splash' in request.meta:
            request.meta['splash']['args']['ua'] = random.choice(spider.settings.get('USER_AGENTS'))
        ua = random.choice(spider.settings.get('USER_AGENTS'))
        request.headers.setdefault('User-Agent', ua)

#随机代理
class ProxyMiddleware(HttpProxyMiddleware):
    def __init__(self, proxy_list):
        self.proxies = proxy_list

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            proxy_list=crawler.settings.get('PROXY_LIST')
        )

    def process_request(self, request, spider):
        proxy = random.choice(self.proxies)
        request.meta['proxy'] = proxy
        spider.logger.info(f'当前使用的代理IP: {proxy}')
        spider.logger.debug(f'当前使用的代理IP: {proxy}')


# 随机Splash实例
class RandomSplashMiddleware:
    """随机选择Splash实例的中间件"""

    def __init__(self, splash_urls):
        self.splash_urls = splash_urls

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            splash_urls=crawler.settings.get('SPLASH_URLS')
        )

    def process_request(self, request, spider):
        # 只处理Splash请求
        if 'splash' in request.meta:
            # 随机选择一个Splash实例
            splash_url = random.choice(self.splash_urls)
            # 更新请求的Splash URL
            request.meta['splash']['endpoint'] = splash_url + '/execute'  # 新增
            spider.logger.debug(f'Using Splash instance: {splash_url}')