#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/23 09:50
# @Author  : 王凯
# @File    : __init__.py.py
# @Project : scrapy_spider
import os
import re
from functools import lru_cache
from pathlib import Path
from urllib import parse

import requests
from faker import Faker
from loguru import logger
from py_mini_racer import MiniRacer
from requests import Session
from sympy.abc import lamda

from project_setting import JS_ENV_SERVER_URL
from utils.proxies_tools import get_company_ip_crawler_by_api
from retrying import retry


@lru_cache(maxsize=10)
def get_file_cache(win_ts_url):
    file_name = win_ts_url.split('/')[-1]
    file_path = Path(f"/tmp/{file_name}")
    if os.path.exists(file_path):
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    else:
        text = requests.get(
            win_ts_url,
            timeout=5,
            proxies=get_company_ip_crawler_by_api(static=False),
            headers={"User-Agent": Faker().user_agent()},
            verify=False
        ).text
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(text)
    return text


class RSException(Exception):
    pass


class Rs6Session(object):
    def __init__(self, proxies=None, cookies=None, debug=False, base_url=None):
        if not base_url:
            self.url = "http://epub.cnipa.gov.cn/Ipc"
        else:
            self.url = base_url
        self.session: Session = requests.Session()
        self.proxies = proxies
        self.cookies = cookies
        self.debug = debug
        with open(f"{Path(__file__).parent.as_posix()}/rs_v8.js", "r", encoding="utf-8") as f:
            self.rs_js = f.read()
        self.src_file = None

    def init(self):
        if self.proxies:
            self.session.proxies = self.proxies
        else:
            self.session.proxies = get_company_ip_crawler_by_api(static=True)
        if self.cookies:
            self.session.cookies.update(self.cookies)
        if self.debug:
            logger.debug(f"设置代理{self.session.proxies}")
        self.session.headers = {
            "Accept": "*/*",
            "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": Faker().user_agent(),
        }

    @property
    @retry(stop_max_attempt_number=10, wait_fixed=1000)
    def cache_response_text(self):
        response = requests.get(
            self.url,
            headers={
                "User-Agent": Faker().user_agent(),
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
                "Accept-Encoding": "gzip, deflate",
                "Connection": "keep-alive",
                "Upgrade-Insecure-Requests": "1",
                "Priority": "u=0, i",
            },
            proxies=get_company_ip_crawler_by_api(),
            timeout=30,
        )
        resp = response.text
        if re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp):
            return resp
        else:
            raise RSException(response.content.decode())

    @retry(stop_max_attempt_number=5, retry_on_exception=lambda x: isinstance(x, RSException))
    def request_core(self, url, **kwargs):
        method = kwargs.pop("method", "GET")
        timeout = kwargs.pop("timeout", 10)
        headers = kwargs.get("headers")
        if headers:
            if 'Cookie' in headers or "cookie" in headers:
                headers.pop("Cookie", None)
                headers.pop("cookie", None)
            # if "Content-Type" in headers:
            #     headers.pop("Content-Type", None)
            self.session.headers.update(headers)

        response = self.session.request(method=method, url=url, timeout=timeout, **kwargs)
        resp = response.content.decode()
        if self.debug:
            logger.warning(f"{response} {self.session.cookies.get_dict()}")
        content = re.findall(r"""<meta\s*.*? content="(.*?)" r=[\"']m['\"]>""", resp)

        if content and response.status_code in [412, 202]:
            win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
            win_ts_url = parse.urljoin(response.url, win_ts_url)
            win_ts = get_file_cache(win_ts_url)
            cookie_dict = self.session.cookies.get_dict()
            if resp.strip() == "":
                try:
                    resp = self.cache_response_text
                    win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
                    win_ts_url = parse.urljoin(response.url, win_ts_url)
                    win_ts = get_file_cache(win_ts_url)
                    json_data = {
                        "url": response.url,
                        "htmltext": resp,
                        "jstext": win_ts,
                        "cookie_dict": cookie_dict,
                    }
                    rs_cookie = requests.post(
                        f"{JS_ENV_SERVER_URL}/rs6", json=json_data
                    ).json()["data"]
                except:
                    rs_cookie = {}
            else:
                json_data = {
                    "url": response.url,
                    "htmltext": resp,
                    "jstext": win_ts,
                    "cookie_dict": cookie_dict,
                }
                rs_cookie = requests.post(
                    f"{JS_ENV_SERVER_URL}/rs6", json=json_data
                ).json()["data"]

            # rs_cookie = requests.post("https://tmp.wfq2020.com/js-env-server/rs6", json={"url": response.url, "htmltext": resp, "jstext": win_ts, "cookie_dict": cookie_dict}).json()['data']
            # file = self.rs_js.replace("CONTENT", content)
            # file = file.replace("https://www.nmpa.gov.cn/index.html", parse.urlparse(response.url).geturl())
            # file = file.replace("JSCODE", win_rs + ";" + win_ts + ";" + "")
            # self.src_file = file
            # ctx = MiniRacer()
            # ctx.eval(self.src_file)
            # rs_cookie = ctx.call("get_run")
            # rs_cookie = rs_cookie.get("cookie")
            # # rs_params = rs_cookie.get("params")
            # del ctx
            for k, v in rs_cookie.items():
                self.session.cookies.set(k, v)
            for k, v in response.cookies.items():
                self.session.cookies.set(k, v)
            # logger.debug(f"rs_cookie  {self.session.cookies.get_dict()}")
            raise RSException
        return response

    def request(self, url, **kwargs):
        retry_times = 5
        logger.debug(kwargs)
        for _ in range(retry_times):
            try:
                resp = self.request_one(url, **kwargs)
            except (
                    requests.exceptions.ReadTimeout,
                    requests.exceptions.ProxyError,
            ):
                continue
            if resp.status_code == 400:
                self.session.cookies.clear()
                continue
            else:
                return resp
        else:
            raise RSException(f'max retry in one request {retry_times}')

    def request_one(self, url, **kwargs):
        self.session.cookies.clear()
        self.init()
        return self.request_core(url, **kwargs)


if __name__ == '__main__':

    # url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
    # data = {'searchCatalogInfo.Pubtype': '3', 'searchCatalogInfo.Ggr_Begin': '', 'searchCatalogInfo.Ggr_End': '',
    #         'searchCatalogInfo.Pd_Begin': '', 'searchCatalogInfo.Pd_End': '', 'searchCatalogInfo.An': '',
    #         'searchCatalogInfo.Pn': '', 'searchCatalogInfo.Ad_Begin': '', 'searchCatalogInfo.Ad_End': '',
    #         'searchCatalogInfo.E71_73': '韶关铸锻机械设备有限公司', 'searchCatalogInfo.E72': '韶关铸锻机械设备有限公司',
    #         'searchCatalogInfo.Edz': '韶关铸锻机械设备有限公司', 'searchCatalogInfo.E51': '',
    #         'searchCatalogInfo.Ti': '韶关铸锻机械设备有限公司', 'searchCatalogInfo.Abs': '韶关铸锻机械设备有限公司',
    #         'searchCatalogInfo.Edl': '韶关铸锻机械设备有限公司', 'searchCatalogInfo.E74': '韶关铸锻机械设备有限公司',
    #         'searchCatalogInfo.E30': '', 'searchCatalogInfo.E66': '', 'searchCatalogInfo.E62': '',
    #         'searchCatalogInfo.E83': '', 'searchCatalogInfo.E85': '', 'searchCatalogInfo.E86': '',
    #         'searchCatalogInfo.E87': '', 'pageModel.pageNum': '1', 'pageModel.pageSize': '10', 'sortFiled': 'ggr_desc',
    #         'searchAfter': '', 'showModel': '1', 'isOr': 'True', '__RequestVerificationToken': ''}

    test = Rs6Session(debug=True)
    url = "http://epub.cnipa.gov.cn/Sw/SwDetail"
    params = {
        "an": "VwoA+KVlTGVdznYD4ArqLA==",
        "pubType": "3",
        "ggr": "",
        "__RequestVerificationToken": ""
    }
    headers = {
        "Referer": "http://epub.cnipa.gov.cn/Sw/SwDetail",
        # "content-type": "application/x-www-form-urlencoded"
    }
    for _ in range(1):
        res = test.request(url=url, data=params, method="POST", headers=headers, verify=False)
        print(res)
        print(res.text)
        # if res.status_code == 200:
        #     import parsel
        #
        #     total = parsel.Selector(res.text).xpath("//script").re_first(r"total_item:\s*(\d+)")
        #     print(total)
        #     print(res.text)
