import re
import time

import requests
import redis
import scrapy.http
import selenium
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy import signals, Spider
from scrapy.http import Request, Response
from loguru import logger
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException


class AmazonSeleniumDownloaderMiddleware:
    # 基本配置
    # 付费ip配置
    API_URL = "https://dps.kdlapi.com/api/getdps/?secret_id=o5b3w54kddfiskjsu5ta&signature=tr45ga5grnvp1943h0paert5qwquy7cb&num=1&pt=1&sep=1"
    USERNAME = "d4472377283"
    PASSWORD = "rudm2ozb"
    # selenium配置
    BROWSER_EXTENSION_PATH = r"D:\ins\browser_driver\chromedriver.exe"
    EXTENSION_PATH = r"D:\ins\Selenium-Chrome-HTTP-Private-Proxy-master"
    # redis
    client = redis.Redis(decode_responses=True)
    redis_key = ""  # ip_pool在Redis中的名称(默认为"spider名称:ip")
    IP_POOL_SIZE = 5  # ip池的大小
    CLEAR_ON_START = True  # 程序结束是否保留ip_pool(默认不保留)
    # 关于重试
    DOWNLOAD_TIMEOUT = 2  # 超时认定时间
    MAX_RETRY_TIMES = 3  # 请求最大重试次数[建议保持和ip_pool_size一致]
    RETRY_ENABLED = 'True'  # 默认进行重试

    # 日志记录
    proxy_ip_count = 0  # 本次程序运行使用的ip数量

    @classmethod
    def from_crawler(cls, crawler):
        # Redis配置
        cls.redis_key = crawler.spider.redis_key + ":ip"  # 指定Redis的(表)名
        # 得在开时的时候删除, 因为结束的时候大多是意外中断,程序走不到这
        if cls.CLEAR_ON_START:
            cls.client.delete(cls.redis_key)  # 删除(表)
            logger.debug("删除成功!!!")

        # 初始化IP池
        for _ in range(cls.IP_POOL_SIZE):
            cls.client.sadd(cls.redis_key, cls.get_new_proxy_ip())

        # 设置"关闭信号", 触发"关闭函数"
        s = cls()
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def spider_closed(self):
        logger.info(f"共计消耗{self.proxy_ip_count}个代理IP")
        self.client.close()  # 关闭连接
        pass

    def process_request(self, request: Request, spider):
        request.meta.setdefault('retry_times', 0)
        while request.meta['retry_times'] <= self.MAX_RETRY_TIMES:
            if request.meta['retry_times'] != 0:
                logger.warning(f"重试次数:{request.meta['retry_times']}/{self.MAX_RETRY_TIMES}url:{request.url}")
            request.meta['retry_times'] += 1
            # 修改代理文件
            proxy_ip = self.client.srandmember(self.redis_key, 1)[0]
            split_proxy_ip = re.findall("@(.*):(.*)/", proxy_ip)[0]
            self.create_background_js(*split_proxy_ip)  # 改写插件的background.js文件

            # Selenium-STEP1:获取Service对象
            service = Service(executable_path=self.BROWSER_EXTENSION_PATH)
            # Selenium-STEP2:配置
            chrome_options = webdriver.ChromeOptions()
            chrome_options.add_argument("--headless=new")  # 无头模式(新版)
            chrome_options.add_argument(f'--load-extension={self.EXTENSION_PATH}')  # 加载浏览器拓展
            chrome_options.add_argument('--disable-blink-features=AutomationControlled')  # 绕过检测
            chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})  # 不加载图片
            browser = webdriver.Chrome(service=service, options=chrome_options)  # Selenium-STEP3:获取浏览器对象

            try:
                browser.get(request.url)  # Selenium-STEP4:发送请求
                body = browser.page_source
                browser.quit()

                if 'amazon' not in body: #cas1: 页面返回"无法连接"
                    request.meta['proxy'] = proxy_ip
                    self.update_proxy_ip(request)
                else:
                    return scrapy.http.HtmlResponse(url=request.url, body=body, request=request, encoding="utf-8")
            except Exception as e:
                # case2:网络连接超时
                if isinstance(e, selenium.common.exceptions.WebDriverException):
                    request.meta['proxy'] = proxy_ip
                    self.update_proxy_ip(request)
                    logger.debug(f"重试原因:{type(e)}; URL:{request.url}")
                    continue
        # 如果达到最大重试次数, 直接返回一个空页面
        return scrapy.http.HtmlResponse(url=request.url, body="", request=request, encoding="utf-8")

    def process_response(self, request, response: Response, spider):
        return response

    def process_exception(self, request, exception, spider):
        pass

    # 更新ip
    def update_proxy_ip(self, request: Request) -> Request:
        """
        :description: "乐观锁"思想判断是否需要获取新ip or 更新ip
        :param request: 本次请求的Request对象
        :return: 返回更新好的Request对象
        """
        # 如果有,说明ip是真过期(or用不了)了,需要更新
        if self.client.sismember(self.redis_key, request.meta['proxy']):
            self.client.srem(self.redis_key, request.meta['proxy'])  # 删除
            self.client.sadd(self.redis_key, self.get_new_proxy_ip())  # 加一个新的
        request.meta['proxy'] = self.client.srandmember(self.redis_key, 1)[0]  # 再随机获取一个
        return request

    # 获取ip
    @classmethod
    def get_new_proxy_ip(cls):
        """
        :description: 获取一个ip地址
        :return: 直接返回一个携带鉴权的ip地址
        """
        proxy_ip = requests.get(cls.API_URL).text  # 获取付费ip
        res_proxy_ip = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": cls.USERNAME, "pwd": cls.PASSWORD, "proxy": proxy_ip}  # proxy鉴权(设置白名单的请自行修改)
        # 日志记录
        cls.proxy_ip_count += 1
        logger.info(f"获取第{cls.proxy_ip_count}个ip: {res_proxy_ip}")
        return res_proxy_ip

    # 修改插件的background.js
    @classmethod
    def create_background_js(cls, host, port):
        logger.info(f"解析结果{host}, {port}")
        content = f"""
            var config = {{
                mode: "fixed_servers",
                rules: {{
                  singleProxy: {{
                    scheme: "http",
                    host: "{host}",
                    port: parseInt({port})
                  }},
                  bypassList: ["localhost"]
                }}
            }};

            chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});

            function callbackFn(details) {{
                return {{
                    authCredentials: {{
                        username: "{cls.USERNAME}",
                        password: "{cls.PASSWORD}"
                    }}
                }};
            }}

            chrome.webRequest.onAuthRequired.addListener(
                callbackFn,
                {{urls: ["<all_urls>"]}},
                ['blocking']
            );
            """
        with open(fr"{cls.EXTENSION_PATH}\background.js", "w") as file:
            file.write(content)
