# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import re
from pathlib import Path
from urllib import parse

from faker import Faker
from retrying import retry
from scrapy import signals
# useful for handling different item types with a single interface
from scrapy.http import HtmlResponse

from apps.patent.patent.spiders.account.core.patent_account_login import get_random_num, encrypt_aes_key
from apps.patent.patent.spiders.account.core.patent_user import PATENT_USER_POOL
from components.middlewares.downloadmiddlewares.public.company_ip_by_api import (
    CompanyIpByApiMiddleware,
)
from project_setting import JS_ENV_SERVER_URL
from utils.proxies_tools import get_company_ip_crawler_by_api
from utils.rs_utils.rs6 import get_file_cache, RSException, requests, Rs6Session


class PatentSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class PatentDownloaderMiddleware(CompanyIpByApiMiddleware):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    src_file = None

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        request.headers.update({"User-Agent": Faker().user_agent()})
        return super().process_request(request, spider)

    @property
    @retry(stop_max_attempt_number=10, wait_fixed=1000)
    def cache_response_text(self):
        response = requests.get(
            "http://epub.cnipa.gov.cn/Ipc",
            headers={
                "User-Agent": Faker().user_agent(),
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
                "Accept-Encoding": "gzip, deflate",
                "Connection": "keep-alive",
                "Upgrade-Insecure-Requests": "1",
                "Priority": "u=0, i",
            },
            proxies=get_company_ip_crawler_by_api(),
            timeout=30,
        )
        resp = response.text
        if re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp):
            return resp
        else:
            raise RSException(response.content.decode())

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        resp = response.text
        set_cookie_list = response.headers.getlist("Set-Cookie")
        set_cookie_dict = {}
        if set_cookie_list:
            set_cookie = set_cookie_list[0].decode()
            set_cookie_dict = dict([i.split("=") for i in set_cookie.split("; ") if "=" in i])

        if response.status in [412, 202]:
            win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
            win_ts_url = parse.urljoin(response.url, win_ts_url)
            win_ts = get_file_cache(win_ts_url)
            cookie_dict = {**set_cookie_dict, **getattr(request, "cookies", {})}
            rs_cookie = requests.post(
                f"{JS_ENV_SERVER_URL}/rs6",
                json={
                    "url": response.url,
                    "htmltext": resp,
                    "jstext": win_ts,
                    "cookie_dict": cookie_dict,
                },
            ).json()["data"]
            new_request = request.copy()
            new_request.cookies.update(set_cookie_dict)
            new_request.cookies.update(rs_cookie)
            return self._retry(new_request, "rs cookie retry", spider) or response

        if response.status in [400, 404, 407, 408, 500]:
            new_request = request.copy()
            new_request.headers.pop("Cookie", None)
            new_request.cookies = {}
            new_proxy = self._get_new_proxy()
            new_request.meta["proxy"] = new_proxy
            new_request.meta["http_proxy"] = new_proxy
            new_request.meta["proxies_info"] = new_proxy  # 代理原始信息
            if resp.strip() == "":
                try:
                    resp = self.cache_response_text
                    win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
                    win_ts_url = parse.urljoin(response.url, win_ts_url)
                    win_ts = get_file_cache(win_ts_url)
                    cookie_dict = {**set_cookie_dict, **getattr(request, "cookies", {})}
                    json_data = {
                        "url": response.url,
                        "htmltext": resp,
                        "jstext": win_ts,
                        "cookie_dict": cookie_dict,
                    }
                    rs_cookie = requests.post(f"{JS_ENV_SERVER_URL}/rs6", json=json_data).json()["data"]
                    new_request = request.copy()
                    new_request.cookies.update(set_cookie_dict)
                    new_request.cookies.update(rs_cookie or {})
                except:
                    pass
            return self._retry(new_request, "rs cookie 400", spider) or response

        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        return CompanyIpByApiMiddleware.process_exception(self, request, exception, spider)

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class PatentAccountMiddleware(PatentDownloaderMiddleware):
    session = Rs6Session()

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        CompanyIpByApiMiddleware.process_request(self, request, spider)
        cb_kwargs = getattr(request, "cb_kwargs", {})
        if request.meta.get("retry_reason"):
            return None
        key = cb_kwargs.get("key") or get_random_num()
        user = PATENT_USER_POOL.get_user()
        request.headers.update(
            {
                "User-Agent": user.headers["User-Agent"],
                **user.cookies,
                "language": "zh",
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "Origin": "https://pss-system.cponline.cnipa.gov.cn",
                "Referer": "https://pss-system.cponline.cnipa.gov.cn/retrieveList?prevPageTit=changgui",
                "EncryptAesKey": encrypt_aes_key(key),
            }
        )
        request.meta["username"] = user.username

        response = self.session.request_core(
            proxies=(
                {"http": request.meta.get("proxies_info"), "https": request.meta.get("proxies_info")}
                if request.meta.get("proxies_info")
                else None
            ),
            url=request.url,
            method=request.method,
            headers=dict(request.headers.to_unicode_dict()),
            data=request.body,
            cookies=request.cookies,
            verify=False,
        )
        return HtmlResponse(
            url=response.url,
            status=response.status_code,
            body=response.content,
            request=request,
        )

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        resp = response.text
        content = re.findall(r"""<meta .*? content=\"([^<]*?)\" r=[\"']m['\"]>""", resp, re.S)
        set_cookie_list = response.headers.getlist("Set-Cookie")
        set_cookie_dict = {}
        if set_cookie_list:
            set_cookie = set_cookie_list[0].decode()
            set_cookie_dict = dict([i.split("=") for i in set_cookie.split("; ") if "=" in i])

        if content and response.status in [412, 202]:
            # content = content[0]
            # win_rs = re.findall(r"<script type=\"text/javascript\" r=[\"']m['\"]>(.*?)</script>", resp)[0]
            win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
            win_ts_url = parse.urljoin(response.url, win_ts_url)
            win_ts = get_file_cache(win_ts_url)
            cookie_dict = {**set_cookie_dict, **getattr(request, "cookies", {})}
            rs_cookie = requests.post(
                f"{JS_ENV_SERVER_URL}/rs6",
                json={
                    "url": response.url,
                    "htmltext": resp,
                    "jstext": win_ts,
                    "cookie_dict": cookie_dict,
                },
            ).json()["data"]
            new_request = request.copy()
            new_request.cookies.update(set_cookie_dict)
            new_request.cookies.update(rs_cookie)
            new_request.meta["retry_reason"] = "rs cookie retry"
            return self._retry(new_request, "rs cookie retry", spider) or response

        if response.status in [400, 404, 407, 408]:
            new_request = request.copy()
            new_request.headers.pop("Cookie", None)
            new_request.cookies = {}
            new_proxy = self._get_new_proxy()
            new_request.meta["proxy"] = new_proxy
            new_request.meta["http_proxy"] = new_proxy
            new_request.meta["proxies_info"] = new_proxy  # 代理原始信息
            if resp.strip() == "":
                try:
                    resp = self.cache_response_text
                    win_ts_url = re.findall(""" src="(.*?)" r=[\"']m['\"]>""", resp)[0]
                    win_ts_url = parse.urljoin(response.url, win_ts_url)
                    win_ts = get_file_cache(win_ts_url)
                    cookie_dict = {**set_cookie_dict, **getattr(request, "cookies", {})}
                    json_data = {
                        "url": response.url,
                        "htmltext": resp,
                        "jstext": win_ts,
                        "cookie_dict": cookie_dict,
                    }
                    rs_cookie = requests.post(f"{JS_ENV_SERVER_URL}/rs6", json=json_data).json()["data"]
                    new_request = request.copy()
                    new_request.cookies.update(set_cookie_dict)
                    new_request.cookies.update(rs_cookie or {})
                except:
                    pass
            return self._retry(new_request, "rs cookie 400", spider) or response

        if response.status in [401]:
            username = request.meta.get("username")
            if username:
                new_request = request.copy()
                PATENT_USER_POOL.get_user(username=username).set_cookies(None)
                return self._retry(new_request, "rs cookie retry", spider) or response

        if response.status in [200]:
            try:
                if response.json()["status"] != 200:
                    new_request = request.copy()
                    spider.logger.info(f"{response.url} 请求返回错误: {response.text}")
                    new_request.meta["retry_reason"] = "rs cookie retry"
                    return self._retry(new_request, "rs cookie retry", spider) or response
            except Exception as e:
                pass

        return response
