#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/6 13:55
# @Author  : 王凯
# @File    : rs_6_middlewares.py
# @Project : scrapy_spider
import os.path
import re
import urllib
from functools import lru_cache
from pathlib import Path
from urllib import parse

import requests
from faker import Faker
from py_mini_racer import MiniRacer
from scrapy import Request
from scrapy.downloadermiddlewares.retry import RetryMiddleware

from project_setting import JS_ENV_SERVER_URL
from utils.proxies_tools import get_company_ip_crawler_by_api, logger as log

USER_AGENT = (
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"
)


class Rs6RetryMiddleware(RetryMiddleware):

    def __init__(self, settings):
        super().__init__(settings)
        with open(rf"{Path(__file__).parent.as_posix()}/rs_v8.js", "r", encoding="utf-8") as f:
            self.rs_js = f.read()
        self.use_rs_api = settings.get("USE_RS_API", False)

    @classmethod
    def from_crawler(cls, crawler):
        if "scrapy.downloadermiddlewares.retry.RetryMiddleware" not in crawler.settings.get("DOWNLOADER_MIDDLEWARES"):
            raise Exception(
                "scrapy.downloadermiddlewares.retry.RetryMiddleware should set None in DOWNLOADER_MIDDLEWARES when use Rs6RetryMiddleware !!!\n"
                """example: \n\tDOWNLOADER_MIDDLEWARES = {"scrapy.downloadermiddlewares.retry.RetryMiddleware": None}"""
            )
        return cls(crawler.settings)

    def _gen_new_cookie(self, request, response, spider):
        set_cookie_list = response.headers.getlist("Set-Cookie")
        set_cookie_dict = {}
        if set_cookie_list:
            set_cookie = set_cookie_list[0].decode()
            set_cookie_dict = dict([i.split('=') for i in set_cookie.split('; ') if '=' in i])
        cookie_dict = {**set_cookie_dict, **getattr(request, "cookies", {})}
        rs_cookie = self.gen_new_cookie(response, cookie_dict, proxy=request.meta.get("proxies_info"), headers=request.headers.to_unicode_dict())
        spider.logger.debug(f"[{self.__class__.__name__}] rs new cookie : {rs_cookie}")
        if request.headers.get('Cookie'):
            request_cookies = dict([i.split('=') for i in request.headers.get('Cookie').decode().split("; ") if '=' in i])
        else:
            request_cookies = {}
        request_cookies.update(rs_cookie)
        return request_cookies

    def gen_new_cookie(self, response, base_cookie=None, proxy=None, headers=None):
        if headers is None:
            headers = {
                "User-Agent": USER_AGENT,
            }
        if base_cookie is None:
            base_cookie = {}
        try:
            resp = response.content.decode()
        except Exception as e:
            resp = response.text
        rs_cookie = base_cookie or {}
        content = re.findall(r"""<meta\s*.*?\s*content="(.*?)" r=[\"']m['\"]>""", resp)
        if content:
            content = content[0]
            win_rs = re.findall(r"<script type=\"text/javascript\" r=[\"']m['\"]>(.*?)</script>", resp)[0]
            win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
            win_ts_url = parse.urljoin(response.url, win_ts_url)
            win_ts = self.get_file_cache(win_ts_url=win_ts_url, logger=log, cookie=base_cookie, proxy=proxy, headers=headers)
            if self.use_rs_api:
                rs_cookie = requests.post(f"{JS_ENV_SERVER_URL}/rs6", json={"url": response.url, "htmltext": resp, "jstext": win_ts, "cookie_dict": base_cookie}).json()['data']
            else:
                file = self.rs_js.replace("CONTENT", content)
                file = file.replace("https://www.nmpa.gov.cn/index.html", urllib.parse.urlparse(response.url).geturl())
                file = file.replace("JSCODE", win_rs + ";" + win_ts + ";" + "")
                ctx = MiniRacer()
                ctx.eval(file)
                rs_cookie = ctx.call("get_run")
                rs_cookie = rs_cookie.get("cookie") or {}
                rs_cookie.update(base_cookie)
        return rs_cookie

    @staticmethod
    def get_file_cache(win_ts_url, logger=None, proxy=None, cookie=None, headers=None):
        if proxy is None:
            proxy = get_company_ip_crawler_by_api()
        if cookie is None:
            cookie = {}
        if logger is None:
            logger = log
        if isinstance(proxy, dict):
            proxies = proxy
        else:
            proxies = {
                "http": proxy,
                "https": proxy,
            }
        for _ in range(3):
            try:
                file_path = win_ts_url.split('/')[-1]
                if os.path.exists(file_path):
                    with open(file_path, 'r', encoding='utf-8') as f:
                        return f.read()
                else:
                    text = requests.get(
                        win_ts_url,
                        proxies=proxies,
                        cookies=cookie,
                        timeout=5,
                        headers=headers,
                    ).text
                    with open(Path(f"/tmp/{file_path}"), 'w', encoding='utf-8') as f:
                        f.write(text)
                return text
            except requests.exceptions.ConnectionError as e:
                logger.error("win_ts_url: {} requests is failed".format(win_ts_url))
                raise e
        else:
            raise Exception("win_ts_url: {} is failed".format(win_ts_url))

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        if response.status in [202, 412]:
            retry_times = request.meta.get("retry_times", 0)
            if retry_times < self.max_retry_times:
                new_request: Request = request.copy()
                new_request.meta["retry_times"] = retry_times + 1
                try:
                    new_cookies = self._gen_new_cookie(request, response, spider)
                    new_request.cookies.update(new_cookies)
                    new_request.priority += 1
                    return self._retry(new_request, "rs_bind", spider) or response
                except Exception as e:
                    return self.set_new_proxy_and_headers(request, response, spider)

        if response.status in [400]:
            return self.set_new_proxy_and_headers(request, response, spider)

        return response

    def process_exception(self, request, exception, spider):
        if isinstance(exception, self.exceptions_to_retry) and not request.meta.get(
                "dont_retry", False
        ):
            return self.set_new_proxy_and_headers(request, None, spider)

    def set_new_proxy_and_headers(self, request, response, spider):
        retry_times = request.meta.get("retry_times", 0)
        if retry_times < self.max_retry_times:
            new_request = request.copy()
            new_request.meta["retry_times"] = retry_times + 1
            new_request.meta["proxy"] = get_company_ip_crawler_by_api().get("http")
            new_request.meta["proxies_info"] = new_request.meta["proxy"]
            new_request.headers.pop('Cookie', None)
            new_request.headers["User-Agent"] = Faker().user_agent()
            return self._retry(new_request, "init_proxies", spider) or response
