# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import re

import scrapy

from apps.patent.patent.items import (
    NetPatentProItem,
    NetPatentLawStatusItem,
    NetPatentProUpdatePatentStatusItem,
    NetPatentProRowsItem,
    NetPatentProRowsUpdatePatentStatusItem,
    NetPatentLawStatusDetailItem,
)


class PatentParser:
    auto_next = True

    def parse_page_list(self, response, **kwargs):
        for i in self.parse_list_every_one(response):
            item = NetPatentProItem(**{k: v for k, v in i.items() if k in NetPatentProItem().fields.keys()})
            new_item = NetPatentProRowsItem(**{k: v for k, v in i.items() if k in NetPatentProItem().fields.keys()})
            yield item
            yield new_item
            # yield from self.gen_detail_request_by_query(
            #     item["uni"], kwargs["pub_type"], "", app_number=item["app_number"], patent_type=item["patent_type"],
            #     applicant_name=item['applicant_name']
            # )
            yield from self.gen_detail_request_by_click(
                item["uni"],
                kwargs["pub_type"],
                "",
                app_number=item["app_number"],
                patent_type=item["patent_type"],
                applicant_name=item["applicant_name"],
            )

    def gen_detail_request_by_query(self, an, pub_type, ggr, app_number, patent_type, applicant_name):
        url = "http://epub.cnipa.gov.cn/SW/SWPageQuery"
        if str(pub_type) in ["1", "2", "3", "4"]:
            pub_type = "3"
        elif str(pub_type) in ["9", "10"]:
            pub_type = "9"
        data = {
            "searchSwInfo.PubType": pub_type,
            "searchSwInfo.An": app_number,
            "searchSwInfo.SwType": "",
            "searchSwInfo.SwPubdate": "",
            "searchSwInfo.SwInfo": "",
            "trsSql": "",
            "pageModel.pageNum": "1",
            "pageModel.pageSize": "10",
            "sortFiled": "ggr_desc",
            "searchAfter": "",
            "__RequestVerificationToken": "",
        }
        yield scrapy.FormRequest(
            url,
            formdata=data,
            callback=self.parse_detail_by_query,
            cb_kwargs=dict(
                app_number=app_number,
                patent_type=patent_type,
                applicant_name=applicant_name,
            ),
        )

    def parse_detail_by_query(self, response, **kwargs):
        datas = []
        app_number = kwargs["app_number"]
        patent_type = kwargs["patent_type"]
        applicant_name = kwargs["applicant_name"]
        for tr in response.xpath("//tr"):
            item = {
                "app_number": tr.xpath('string(./td[@class="name"])').get().strip(),
                "date": tr.xpath('string(./td[@class="num"])').get().strip(),
                "status": tr.xpath('string(./td[@class="title"])').get().strip(),
            }
            if item["app_number"]:
                datas.append(item)
                yield NetPatentLawStatusItem(**item)
        if datas:
            yield NetPatentProUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )
            yield NetPatentProRowsUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )

    def parse_list_every_one(self, response):
        return_data = []
        chinese_mapping = {
            "专利类型": "patent_type",
            "专利名称": "patent_name",
            "唯一标识符": "uni",
            "申请公布号": "pub_number",
            "申请公布日": "pub_date",
            "授权公告号": "grant_number",
            "授权公告日": "grant_date",
            "申请号": "app_number",
            "申请日": "application_time",
            "申请人": "applicant_name",
            "发明人": "inventor",
            "地址": "address",
            "邮编": "post_code",
            "分类号": "cat",
            "专利代理机构": "agency",
            "专利代理师": "agent",
            "优先权": "priority_info",
            "本国优先权": "priority_info",
            "分案原申请": "",
            "生物保藏": "",
            "PCT进入国家阶段日": "",
            "PCT申请数据": "",
            "PCT公布数据": "",
            "摘要": "abstracts",
            "图片url": "img_url",
            "全部分类号": "all_cat_num",
            "同一申请的已公布的文献号": "",
            "对比文件": "",
            "更正文献出版日": "",
            "解密公告日": "",
            "国际注册号": "",
            "标题": "title",
            "专利权人": "applicant_name",
            "设计人": "inventor",
            "简要说明": "abstracts",
            "申请号/专利号": "patent_num",
            "专利状态": "patent_status",
        }
        for item in response.xpath("//*[@class='item']"):
            all_dict = {"图片url": response.urljoin(item.xpath(".//img/@src").get())}

            uni = item.xpath(".//a[1]/@onclick").re_first(r"\(\'(.*?)\'")
            title = item.xpath("string(.//h1)").get().strip()
            patent_type = self.reg_one(r"\s*\[(.{4,8})\]\s*", title)
            patent_name = self.reg_one(r"\s*\[(?:.{4,8})\]\s*([^\s]*)", title)

            all_dict.update(
                {
                    "专利类型": patent_type,
                    "专利名称": patent_name,
                    "唯一标识符": uni,
                    "标题": title,
                }
            )
            for i in item.xpath('./div[@class="info"]'):
                # 基本信息
                for base_info in i.xpath(".//dl"):
                    name = (base_info.xpath("string(./dt)").get().strip() or "").split("：")[0]
                    all_value = base_info.xpath('./dd/div[@class="allinfo"]')
                    value = base_info.xpath("string(./dd/text())").get().strip()
                    if all_value:
                        value = value + all_value.xpath("string(.)").get().strip()
                    all_dict[name] = value

            for i in item.xpath('./div[@class="intro"]'):
                # 拓展信息
                for extend_info in i.xpath("./dl"):
                    name = (extend_info.xpath("string(./dt)").get().strip() or "").split("：")[0]
                    all_value = extend_info.xpath('./dd/div[@class="allinfo"]')
                    value = extend_info.xpath("./dd/text()").get().strip()
                    if name == "地址":
                        post_code, address = self.reg_one(
                            r"(\d*)\s*([\w\W]*)\s*",
                            extend_info.xpath("./dd/text()").get(),
                        )
                        all_dict[name] = address
                        all_dict["邮编"] = post_code
                    elif name == "分类号":
                        all_dict[name] = value

                        _tmp_detail = {}

                        reg_str = r"<dt>\s*(.*?)\s*</dt>\s*<dd>(.*?)</dd>"
                        if all_value.get():
                            for sub_title, sub_value in re.findall(reg_str, all_value.get()):
                                sub_title = sub_title.strip().strip("：")
                                sub_value = sub_value.strip()
                                _tmp_detail[sub_title] = sub_value
                            if _tmp_detail:
                                # 有折叠的详情
                                _tmp_reg = rf"(?:<dd>([^<]*?)</dd>\s*)+(?:<dt>\s*{list(_tmp_detail.keys())[0]})"
                                _all_cat_num = re.findall(_tmp_reg, all_value.get())
                            else:
                                # 无折叠的详情
                                _tmp_reg = r"(?:<dd>([^<]*?)</dd>)+"
                                _all_cat_num = re.findall(_tmp_reg, all_value.get())

                            _all_cat_num = [
                                i.strip() for i in _all_cat_num if i.strip() and "已同日申请" not in i.strip()
                            ]
                            all_dict.update(_tmp_detail)
                            all_dict["全部分类号"] = "".join(set([value] + _all_cat_num))

                        # # all_value 折叠的部分
                        #
                        # sub_all_value = all_value.xpath("./dl")
                        # sub_dt_len = len(sub_all_value.xpath(".//dt"))
                        # sub_dd_len = len(sub_all_value.xpath(".//dd"))
                        # if sub_dt_len == sub_dd_len:
                        #     all_dict["全部分类号"] = value
                        #     for j in range(1, len(sub_all_value.xpath(".//dt")) + 1):
                        #         name_j = (sub_all_value.xpath(f"string(./dt[{j}])").get().strip() or "").split("：")[0]
                        #         value_j = sub_all_value.xpath(f"./dd[{j}]/text()").get().strip()
                        #         all_dict[name_j] = value_j
                        # else:
                        #     dd_start = sub_dd_len - sub_dt_len
                        #     all_dict[name] = value
                        #     all_dict["全部分类号"] = "".join(
                        #         set(
                        #             [value]
                        #             + [
                        #                 sub_all_value.xpath(f"./dd[{i}]/text()").get().strip()
                        #                 for i in range(1, dd_start + 1)
                        #             ]
                        #         )
                        #     )
                        #     for j in range(1, len(sub_all_value.xpath(".//dt")) + 1):
                        #         name_j = (sub_all_value.xpath(f"string(./dt[{j}])").get().strip() or "").split("：")[0]
                        #         value_j = sub_all_value.xpath(f"./dd[{j + dd_start}]/text()").get().strip()
                        #         all_dict[name_j] = value_j
                    else:
                        all_dict[name] = value

            if "专利代理机构" in all_dict:
                patent_agency = all_dict["专利代理机构"]
                all_dict["专利代理机构"] = self.reg_one(r"(.*?)\d+$", patent_agency) or patent_agency

            if "发明人" in all_dict:
                all_dict["发明人"] = self._split(all_dict["发明人"])

            if "申请号" in all_dict:
                all_dict["申请号/专利号"] = f'CN{all_dict["申请号"][:-1]}.{all_dict["申请号"][-1:]}'
            if "全部分类号" in all_dict:
                all_dict["全部分类号"] = list(set(self._split(all_dict["全部分类号"])))

            if "申请人" in all_dict:
                all_dict["申请人"] = self._split(all_dict["申请人"])
            if "设计人" in all_dict:
                all_dict["设计人"] = self._split(all_dict["设计人"])
            if "专利权人" in all_dict:
                all_dict["专利权人"] = self._split(all_dict["专利权人"])

            if "分类号" in all_dict:
                all_dict["分类号"] = self._split(all_dict["分类号"])

            if "授权公告号" in all_dict:
                patent_status = "授权"
            else:
                patent_status = "公布"

            all_dict.update({"专利状态": patent_status})

            ret = {chinese_mapping.get(k) or k: v for k, v in all_dict.items() if v}
            return_data.append(ret)
        return return_data

    @staticmethod
    def _gen_search_data(page=1, company_name="", pub_type="1"):
        return {
            "searchCatalogInfo.Pubtype": pub_type,
            "searchCatalogInfo.Ggr_Begin": "",
            "searchCatalogInfo.Ggr_End": "",
            "searchCatalogInfo.Pd_Begin": "",
            "searchCatalogInfo.Pd_End": "",
            "searchCatalogInfo.An": "",
            "searchCatalogInfo.Pn": "",
            "searchCatalogInfo.Ad_Begin": "",
            "searchCatalogInfo.Ad_End": "",
            "searchCatalogInfo.E71_73": f"{company_name}",
            "searchCatalogInfo.E72": f"{company_name}",
            "searchCatalogInfo.Edz": f"{company_name}",
            "searchCatalogInfo.E51": "",
            "searchCatalogInfo.Ti": f"{company_name}",
            "searchCatalogInfo.Abs": f"{company_name}",
            "searchCatalogInfo.Edl": f"{company_name}",
            "searchCatalogInfo.E74": f"{company_name}",
            "searchCatalogInfo.E30": "",
            "searchCatalogInfo.E66": "",
            "searchCatalogInfo.E62": "",
            "searchCatalogInfo.E83": "",
            "searchCatalogInfo.E85": "",
            "searchCatalogInfo.E86": "",
            "searchCatalogInfo.E87": "",
            "pageModel.pageNum": f"{page}",
            "pageModel.pageSize": "10",
            "sortFiled": "ggr_desc",
            "searchAfter": "",
            "showModel": "1",
            "isOr": "True",
            "__RequestVerificationToken": "",
        }

    @staticmethod
    def reg_one(reg, text, default=""):
        if not text:
            return default
        if isinstance(reg, str):
            reg = [reg]
        for _ in reg:
            ret = re.findall(_, text)
            if ret:
                return ret[0]
        return default

    @staticmethod
    def _split(text, sep=";"):
        text = text or ""
        return [i.strip() for i in text.split(sep) if i.strip()]

    def gen_detail_request_by_click(self, an, pub_type, ggr, app_number, patent_type, applicant_name):
        url = "http://epub.cnipa.gov.cn/Sw/SwDetail"
        data = {
            "an": an,
            "pubType": pub_type,
            "ggr": ggr,
            "__RequestVerificationToken": "",
        }
        yield scrapy.FormRequest(
            url,
            formdata=data,
            callback=self.parse_detail_by_click,
            cb_kwargs=dict(
                app_number=app_number,
                patent_type=patent_type,
                applicant_name=applicant_name,
                uni=an,
            ),
        )

    def parse_detail_by_click(self, response, **kwargs):

        app_number = kwargs.get("app_number")
        uni = kwargs.get("uni")
        patent_type = kwargs.get("patent_type")
        applicant_name = kwargs.get("applicant_name")
        items = response.xpath('//div[@class="info"]//div[@class="item"]')
        datas = []
        for item in items:
            data = {}
            for dl in item.xpath(".//dl"):
                name = dl.xpath("string(./dt/text())").re_first(r"(.*?)：")
                if name == "事务数据类型":
                    name = "status"
                elif name == "事务数据公告日":
                    name = "date"
                data[name] = dl.xpath("./dd/text()").get()
            data["detail"] = item.xpath(".//p[@class='status']").re_first("</?p.*?>(.*?)</?p.*?>").replace("<br>", "\n")
            datas.append(data)
        for i in datas:
            i.update({"app_number": app_number, "uni": uni})
            yield NetPatentLawStatusDetailItem(**i)
        if datas:
            yield NetPatentProUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )
            yield NetPatentProRowsUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )
