import datetime
import json
import re
from queue import Queue
from urllib.parse import quote

import parsel
import scrapy

from apps.patent.patent.items import (
    NetPatentProItem,
    NetPatentProRowsItem,
    NetPatentLawStatusDetailItem,
    NetPatentProUpdatePatentStatusItem,
    NetPatentProRowsUpdatePatentStatusItem,
    NetPatentSimilarItem,
    NetPatentCitedItem,
    NetPatentCiteItem,
)
from apps.patent.patent.spiders.account.core.patent_account_login import get_random_num, aes_encrypt
from apps.patent.patent.spiders.patent_search import RosterPatentSpider


class AccountNetPatentProItem(NetPatentProItem):
    __update_key__ = [
        i
        for i in NetPatentProItem.__update_key__
        if i not in ["address", "post_code", "agent", "img_url", "applicant_name"]
    ]


class AccountNetPatentProRowsItem(NetPatentProRowsItem):
    __update_key__ = [
        i
        for i in NetPatentProRowsItem.__update_key__
        if i not in ["address", "post_code", "agent", "img_url", "applicant_name", "applicant_name_one"]
    ]


class AccountNetPatentLawStatusDetailItem(NetPatentLawStatusDetailItem):
    __update_key__ = [i for i in NetPatentLawStatusDetailItem.__update_key__ if i not in ["uni", "detail"]]


class PatentAccountSpider(RosterPatentSpider):
    name = "patent_account_search_spider"
    local_task_queue = Queue(maxsize=1)
    auto_next = True
    crawler_detail = True

    def make_request_from_data(self, formatted_data: str or dict):
        company_name = formatted_data["query_key"]
        src_data = {
            "pagination": {"page": 1, "limit": 40},
        }
        key = get_random_num()
        data = {
            **src_data,
            "executableSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
            "parseSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
            "searchExp": quote(f"""AES({aes_encrypt(f"申请（专利权）人=({company_name})", key)})"""),
            "sortFields": "-APD",
            "dbName": "CNDB,WPDB",
            "interfaceId": "2001154",
        }
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults"
        headers = {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers=headers,
            cb_kwargs=dict(company_name=company_name, key=key, src_data=src_data),
        )

    @classmethod
    def update_settings(cls, settings) -> None:
        DOWNLOADER_MIDDLEWARES = settings.get("DOWNLOADER_MIDDLEWARES") or {}
        DOWNLOADER_MIDDLEWARES.update(
            {
                "patent.middlewares.PatentDownloaderMiddleware": None,
                "patent.middlewares.PatentAccountMiddleware": 543,
            }
        )
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502],
                "RETRY_TIMES": 100,
                "RETRY_HTTP_CODES": [],
                "DOWNLOADER_MIDDLEWARES": DOWNLOADER_MIDDLEWARES,
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self):
        if self.local_task_queue.full():
            obj = self.local_task_queue.get_nowait()
            company_name = obj.get("query_key")
        else:
            company_name = "烟台华正科信新材科技有限公司"
        src_data = {
            "pagination": {"page": 1, "limit": 40},
        }
        key = get_random_num()
        data = {
            **src_data,
            "executableSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
            "parseSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
            "searchExp": quote(f"""AES({aes_encrypt(f"申请（专利权）人=({company_name})", key)})"""),
            "sortFields": "-APD",
            "dbName": "CNDB,WPDB",
            "interfaceId": "2001154",
        }
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults"
        headers = {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers=headers,
            cb_kwargs=dict(company_name=company_name, key=key, src_data=src_data),
        )

    def parse(self, response, **kwargs):
        company_name = kwargs.get("company_name")
        src_data = kwargs.get("src_data")
        resp = response.json()
        search_result_record = resp["t"]["searchResultRecord"]
        next_data = []
        for i in search_result_record:
            for list_item in self.parser_research_list_item(i, response):
                item = AccountNetPatentProItem(
                    **{k: v for k, v in list_item.items() if k in AccountNetPatentProItem().fields.keys()}
                )
                new_item = AccountNetPatentProRowsItem(
                    **{k: v for k, v in list_item.items() if k in AccountNetPatentProRowsItem().fields.keys()}
                )
                yield item
                yield new_item
                next_data.append((i, list_item))

        if self.crawler_detail:
            for i, list_item in next_data:
                yield from self.gen_parser_detail(i, list_item)
                yield from self.gen_parser_detail_similar(i, list_item)
                yield from self.gen_parser_patcitinfo(i, list_item)
                yield from self.gen_parser_patcitedinfos(i, list_item)

        total_count = resp["t"]["pagination"]["totalCount"]
        page = resp["t"]["pagination"]["page"]
        limit = resp["t"]["pagination"]["limit"]
        src_data["pagination"]["page"] = page + 1

        if total_count > limit * page:
            url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults"
            headers = {
                "Accept": "application/json, text/plain, */*",
                "Content-Type": "application/json",
            }
            key = get_random_num()
            data = {
                **src_data,
                "executableSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
                "parseSearchExp": quote(f"""AES({aes_encrypt(f"PA='{company_name}'", key)})"""),
                "searchExp": quote(f"""AES({aes_encrypt(f"申请（专利权）人=({company_name})", key)})"""),
                "sortFields": "-APD",
                "dbName": "CNDB,WPDB",
                "interfaceId": "2001154",
            }
            yield scrapy.FormRequest(
                url,
                body=json.dumps(data),
                method="POST",
                headers=headers,
                cb_kwargs=dict(company_name=company_name, key=key, src_data=src_data),
            )

    def gen_parser_detail(self, i, list_item):
        app_number = list_item["app_number"]
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-tools/tools/lawstate/lawstates"
        anId = i["ap"]
        data = {
            "param": {
                "anId": anId,
            },
            "pagination": {"limit": 40, "page": 1},
            "interfaceId": "1998376",
        }
        headers = {
            "Content-Type": "application/json",
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers=headers,
            callback=self.parser_law_list,
            cb_kwargs=dict(
                app_number=app_number,
                patent_type=list_item["patent_type"],
                applicant_name=list_item["applicant_name"],
                url=url,
                params_data=data,
            ),
        )

    def parser_law_list(self, response, **kwargs):
        patent_type = kwargs.get("patent_type")
        applicant_name = kwargs.get("applicant_name")
        data = response.json()
        app_number = kwargs.get("app_number")
        url = kwargs.get("url")
        params_data = kwargs.get("params_data")
        total_count = ((data.get("t") or {}).get("pagination") or {}).get("totalCount")
        limit = ((data.get("t") or {}).get("pagination") or {}).get("limit")
        page = ((data.get("t") or {}).get("pagination") or {}).get("page")
        result_records = (data.get("t") or {}).get("result") or []
        mapping = {
            "发明专利申请公布": "公布",
            "发明专利权授予": "授权",
            "实用新型专利权授予": "授权",
            "外观设计专利权授予": "授权",
        }
        for i in result_records:
            law_state_cn_meaning = i["lawStateCNMeaning"]
            item = {
                "uni": "",
                "date": datetime.datetime.strptime(i["prsDate"], "%Y%m%d").strftime("%Y.%m.%d"),
                "app_number": app_number,
                "status": mapping.get(law_state_cn_meaning) or law_state_cn_meaning,
                "detail": None,
            }
            yield AccountNetPatentLawStatusDetailItem(**item)
        if result_records:
            yield NetPatentProUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": mapping.get(result_records[-1]["lawStateCNMeaning"])
                    or result_records[-1]["lawStateCNMeaning"],
                    "applicant_name": applicant_name,
                }
            )
            yield NetPatentProRowsUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": mapping.get(result_records[-1]["lawStateCNMeaning"])
                    or result_records[-1]["lawStateCNMeaning"],
                    "applicant_name": applicant_name,
                }
            )
        if total_count:
            if total_count > page * limit:
                params_data.update({"pagination": {"limit": limit, "page": page + 1}})
                yield scrapy.FormRequest(
                    url,
                    body=json.dumps(params_data),
                    method="POST",
                    headers={
                        "Content-Type": "application/json",
                    },
                    callback=self.parser_law_list,
                    cb_kwargs=dict(
                        app_number=app_number,
                        params_data=params_data,
                        url=url,
                        patent_type=patent_type,
                        applicant_name=applicant_name,
                    ),
                )

    def parser_research_list_item(self, i, response):
        item = {}
        com = {j["indexEnName"]: j["value"] for j in i["items"]["COM"]}
        gk = {j["indexEnName"]: j["value"] for j in i["items"]["GK"]}
        sq = {j["indexEnName"]: j["value"] for j in i["items"]["SQ"]}
        ipc_detail = {j["indexEnName"]: j["value"] for j in i["ipcDetail"]}
        abview = {j["indexEnName"]: j["value"] for j in i["abview"]}
        patent_type = "发明"
        if sq:
            patent_status = "授权"
        elif gk:
            patent_status = "公布"
        else:
            patent_status = "申请"

        if i["invType"] == "FM":
            patent_type = "发明"
        elif i["invType"] == "XX":
            patent_type = "实用新型"
            patent_status = "授权"
        elif i["invType"] == "WG":
            patent_type = "外观设计"
            patent_status = "授权"

        item["patent_type"] = patent_type + patent_status if patent_type == "发明" else patent_type
        item["patent_name"] = i["ti"]
        # item["img_url"] = response.urljoin(i['absImage']['url'])
        item["patent_status"] = patent_status
        item["pub_number"] = i["pn"]
        item["pub_date"] = i["pd"]
        item["grant_number"] = i["pn"].rstrip("A") + "B" if sq and patent_type == "发明" else None
        item["grant_date"] = sq.get("SQ_PD") if sq else None
        item["patent_num"] = i["apo"]
        item["app_number"] = i["apo"].replace(".", "").replace("CN", "")
        item["application_time"] = i["apd"]
        item["applicant_name"] = i["pa"].split("; ") if i["pa"] else None
        item["inventor"] = i["inv"].split("; ") if i["inv"] else None
        item["cat"] = i["ipcMain"].split("; ") if i["ipcMain"] else None
        patent_agency = com.get("AGY")
        item["agency"] = self.reg_one(r"(.*?)\d+$", patent_agency) or patent_agency
        item["title"] = f'[{item["patent_type"]}] {item["patent_name"]}'
        item["abstracts"] = parsel.Selector(text=abview.get("ABVIEW")).xpath("string(.)").get()
        item["main_cat_num"] = i["ipcMain"]
        item["all_cat_num"] = ipc_detail.get("GK_ICST").split(";") if ipc_detail.get("GK_ICST") else None
        item["address"] = None
        item["post_code"] = None
        item["agent"] = None
        yield item

    @staticmethod
    def reg_one(reg, text, default=""):
        if not text:
            return default
        if isinstance(reg, str):
            reg = [reg]
        for _ in reg:
            ret = re.findall(_, text)
            if ret:
                return ret[0]
        return default

    def gen_parser_detail_similar(self, i, list_item):
        app_number = list_item["app_number"]
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-tools/tools/cognation/cognations"
        data = {
            "interfaceId": "1998380",
            "param": {"anId": i["ap"], "pnId": i["pn"], "dbName": "CNDB"},
            "pagination": {"limit": 10, "page": 1},
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers={
                "Content-Type": "application/json",
            },
            callback=self.parser_detail_similar,
            cb_kwargs=dict(
                app_number=app_number,
                params_data=data,
                url=url,
            ),
        )

    def parser_detail_similar(self, response, **kwargs):
        # 同族
        data = response.json()
        app_number = kwargs.get("app_number")
        url = kwargs.get("url")
        params_data = kwargs.get("params_data")
        total_count = ((data.get("t") or {}).get("pagination") or {}).get("totalCount")
        limit = ((data.get("t") or {}).get("pagination") or {}).get("limit")
        page = ((data.get("t") or {}).get("pagination") or {}).get("page")
        result_records = (data.get("t") or {}).get("result") or []
        for i in result_records:
            item = {
                "pid": app_number,
                "similar_papplication_id": i["an"],  # 申请号
                "similar_publication_id": i["pn"],  # 公开号
            }
            yield NetPatentSimilarItem(**item)

        if total_count:
            if total_count > page * limit:
                params_data.update({"pagination": {"limit": limit, "page": page + 1}})
                yield scrapy.FormRequest(
                    url,
                    body=json.dumps(params_data),
                    method="POST",
                    headers={
                        "Content-Type": "application/json",
                    },
                    callback=self.parser_detail_similar,
                    cb_kwargs=dict(
                        app_number=app_number,
                        params_data=params_data,
                        url=url,
                    ),
                )

    def gen_parser_patcitinfo(self, i, list_item):
        # 引证
        app_number = list_item["app_number"]
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-tools/tools/patcitinfo/patcitinfos"
        data = {
            "interfaceId": "1998450",
            "param": {"anId": i["ap"], "pnId": i["pn"], "dbName": "CNDB"},
            "pagination": {"limit": 10, "page": 1},
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers={
                "Content-Type": "application/json",
            },
            callback=self.parser_patcitinfo,
            cb_kwargs=dict(
                app_number=app_number,
                params_data=data,
                url=url,
            ),
        )

    def parser_patcitinfo(self, response, **kwargs):
        # 引证
        data = response.json()
        app_number = kwargs.get("app_number")
        url = kwargs.get("url")
        params_data = kwargs.get("params_data")
        total_count = ((data.get("t") or {}).get("pagination") or {}).get("totalCount")
        limit = ((data.get("t") or {}).get("pagination") or {}).get("limit")
        page = ((data.get("t") or {}).get("pagination") or {}).get("page")
        result_records = (data.get("t") or {}).get("result") or []
        for i in result_records:
            item = {
                "pid": app_number,
                "cite_publication_id": i["pn"],  # 申请号
                "cite_application_id": i["an"],  # 引证专利申请号
                "cite_correlation": i["relation"],  # 引证相关性
            }
            yield NetPatentCiteItem(**item)

        if total_count:
            if total_count > page * limit:
                params_data.update({"pagination": {"limit": limit, "page": page + 1}})
                yield scrapy.FormRequest(
                    url,
                    body=json.dumps(params_data),
                    method="POST",
                    headers={
                        "Content-Type": "application/json",
                    },
                    callback=self.parser_patcitinfo,
                    cb_kwargs=dict(
                        app_number=app_number,
                        params_data=params_data,
                        url=url,
                    ),
                )

    def gen_parser_patcitedinfos(self, i, list_item):
        # 被引证
        app_number = list_item["app_number"]
        url = "https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-tools/tools/patcitinfo/patcitedinfos"
        data = {
            "interfaceId": "1998450",
            "param": {"anId": i["ap"], "pnId": i["pn"], "dbName": "CNDB"},
            "pagination": {"limit": 10, "page": 1},
        }
        yield scrapy.FormRequest(
            url,
            body=json.dumps(data),
            method="POST",
            headers={
                "Content-Type": "application/json",
            },
            callback=self.parser_patcitedinfos,
            cb_kwargs=dict(
                app_number=app_number,
                params_data=data,
                url=url,
            ),
        )

    def parser_patcitedinfos(self, response, **kwargs):
        # 被引证
        data = response.json()
        app_number = kwargs.get("app_number")
        url = kwargs.get("url")
        params_data = kwargs.get("params_data")
        total_count = ((data.get("t") or {}).get("pagination") or {}).get("totalCount")
        limit = ((data.get("t") or {}).get("pagination") or {}).get("limit")
        page = ((data.get("t") or {}).get("pagination") or {}).get("page")
        result_records = (data.get("t") or {}).get("result") or []
        for i in result_records:
            item = {
                "pid": app_number,
                "cited_publication_id": i["pn"],  # 申请号
                "cited_application_id": i["an"],  # 被引证专利申请号
                "cited_correlation": i["relation"],  # 被引证相关性
            }
            yield NetPatentCitedItem(**item)

        if total_count:
            if total_count > page * limit:
                params_data.update({"pagination": {"limit": limit, "page": page + 1}})
                yield scrapy.FormRequest(
                    url,
                    body=json.dumps(params_data),
                    method="POST",
                    headers={
                        "Content-Type": "application/json",
                    },
                    callback=self.parser_patcitedinfos,
                    cb_kwargs=dict(
                        app_number=app_number,
                        params_data=params_data,
                        url=url,
                    ),
                )


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl patent_account_search_spider".split())


if __name__ == "__main__":
    run()
