#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time   : 2023/8/30 10:25
# @Author : 王凯
# @File   : clean_model.py
# @Project: spider-man
import datetime
import json
import re
import sys
from functools import lru_cache
from pathlib import Path
from typing import Dict

import redis
from components.settings.private.wfq_dev_redis_settings import REDIS_URL

sys.path.append(Path(__file__).parent.parent.parent.parent.as_posix())
from utils.es_company_tools import EsCompanyTools
import numpy as np
import requests
import concurrent.futures

from apps.patent.clean import *
from apps.patent.patent.items import NetPatentProItem, NetPatentLawStatusItem, NetPatentProRowsItem
from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB

PUBLIC_OPINION_URL = "https://api-dev.local.wfq2020.com/public-opinion"


class CleanPatentModel:
    patent_filed = NetPatentProItem().fields.keys()
    patent_law_filed = NetPatentLawStatusItem().fields.keys()

    def __init__(self):
        self.prod_db = MysqlDB(
            ip=NET_ROBOT_MYSQL_CONFIG["MYSQL_IP"],
            port=NET_ROBOT_MYSQL_CONFIG["MYSQL_PORT"],
            db=NET_ROBOT_MYSQL_CONFIG["MYSQL_DB"],
            user_name=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )
        self.db = self.prod_db
        self.pg_db = PostgresqlDB()
        logger.debug("ipc init ...")

        for i in range(2018, 2023 + 1):
            setattr(
                self,
                f"ipc_model_{i}",
                IPCModel(self.prod_db.find(f"select * from net_parent_ipc where version = {i}", to_json=True)),
            )
        logger.debug("ipc init done ...")
        self.loc_model = LOCModel(self.prod_db.find("select * from net_parent_loc", to_json=True))
        with open(f"{Path(__file__).parent}/dm_gjhyhf_map_qyzhpjhyysb.json", "r", encoding="utf-8") as f:
            self.industry_model = IndustryModel(industry_info=json.loads(f.read()))
        logger.debug("ipc industry done...")
        self.auto_fill = False
        self.server = redis.from_url(REDIS_URL)

    @staticmethod
    def _replace_zh(string):
        return string.replace("(", "（").replace(")", "）")

    @staticmethod
    def _replace_zh_r(string):
        return string.replace("（", "(").replace("）", ")")

    def loader_all_data_df(self, com_tax_ids: List[Dict[str, str]]) -> pd.DataFrame:
        query_data = []
        used_name = []
        for i in com_tax_ids:
            used_name.extend((i["used_name"] or "").split(","))

        used_name = list(set([i for i in used_name if i]))

        all_coms = set([f"'{i['company_name']}'" for i in com_tax_ids] + [f"'{i}'" for i in used_name])

        all_coms = set(list(all_coms) + list([self._replace_zh(i) for i in all_coms]))

        com_tax_ids_str = ",".join(all_coms)
        if com_tax_ids_str:
            sql = f"select id,company_name from net_patent_company_info where company_name in ({com_tax_ids_str})"
            com_datas = self.db.find(sql, to_json=True)
            com_mappings = {com_data["company_name"]: com_data["id"] for com_data in com_datas}
            if com_mappings:
                simple_sql = """(SELECT {filed} FROM {table} WHERE applicant_name_one in ({applicant_name_one_list}))"""
                sql = " union all ".join(
                    [
                        simple_sql.format(
                            table=NetPatentProRowsItem().table_name,
                            filed="'{company_name}' as company_name,'{taxpayer_id}' as taxpayer_id,'{industry_all}' as "
                            "industry_all,id,".format(
                                company_name=com_tax_id["company_name"],
                                taxpayer_id=com_tax_id["taxpayer_id"],
                                industry_all=(
                                    json.dumps(com_tax_id["industry_all"], ensure_ascii=False)
                                    if com_tax_id["industry_all"]
                                    else ""
                                ),
                            )
                            + ",".join(self.patent_filed),
                            applicant_name_one_list=",".join([f"'{i}'" for i in list(com_mappings.keys())]),
                        )
                        for com_tax_id in com_tax_ids
                    ]
                )
                if sql:
                    query_data = self.db.find(sql, to_json=True)
        df_pro = pd.DataFrame(query_data)
        if not df_pro.empty:
            df_pro = df_pro.drop_duplicates(subset=["id", "company_name"], keep="first")
            df_pro = df_pro.drop(columns=["id"])
            df_pro["main_cat_num"].mask(
                df_pro["main_cat_num"].isnull(), df_pro["cat"].map(lambda x: x[0] if x else None), inplace=True
            )  # 补齐主分类
            for _com in df_pro["company_name"].unique().tolist():
                df_pro["applicant_name"].mask(
                    df_pro["company_name"] == _com,
                    df_pro["applicant_name"].map(lambda _com_list: self.replace_coms_std(_com, _com_list=_com_list)),
                    inplace=True,
                )  # 补齐曾用名
            df_pro["industry_all"] = df_pro["industry_all"].map(lambda x: x if x else {})
            df_pro["taxpayer_industry1"] = df_pro["industry_all"].map(lambda x: (x or {}).get("1"))
            df_pro["taxpayer_industry2"] = df_pro["industry_all"].map(lambda x: (x or {}).get("2"))
            df_pro["taxpayer_industry3"] = df_pro["industry_all"].map(lambda x: (x or {}).get("3"))
            df_pro["taxpayer_industry4"] = df_pro["industry_all"].map(lambda x: (x or {}).get("4"))
        return df_pro

    def replace_coms_std(self, _com, _com_list):
        if self._replace_zh(_com) in _com_list:
            _com_list = [self._replace_zh_r(i) for i in _com_list]
        elif self._replace_zh_r(_com) in _com_list:
            _com_list = [self._replace_zh_r(i) for i in _com_list]
        else:
            _com_list = list(set(_com_list + [self._replace_zh_r(_com)]))
        return [self._replace_zh_r(i) for i in _com_list]

    @staticmethod
    def _deal_patent_law_status(df_pro: pd.DataFrame) -> List:
        return df_pro.set_index("app_number").to_dict(orient="records")

    @staticmethod
    def process_cat_num(cat_num: str) -> str:
        if cat_num:
            ret = re.findall(r"([A-Za-z0-9/-]*)", cat_num)
            if ret:
                return ret[0]
        return ""

    @lru_cache(maxsize=5000)
    def process_mapping_cat_num(self, cat_num: str) -> dict:
        if cat_num:
            return (
                getattr(self, "ipc_model_2023").get_code(cat_num)
                or getattr(self, "ipc_model_2022").get_code(cat_num)
                or getattr(self, "ipc_model_2021").get_code(cat_num)
                or getattr(self, "ipc_model_2020").get_code(cat_num)
                or getattr(self, "ipc_model_2019").get_code(cat_num)
                or getattr(self, "ipc_model_2018").get_code(cat_num)
                or self.loc_model.get_code(cat_num)
            )
        return {}

    def process_mapping_cat_num_series(self, series):
        main_cat_num, cat_num = series[["main_cat_num", "cat"]]
        if main_cat_num:
            return self.process_mapping_cat_num(main_cat_num)
        else:
            if cat_num:
                return self.process_mapping_cat_num(cat_num[0])
        return {}

    def fill_main_cat_num_zh(self, df_pro: pd.DataFrame) -> pd.DataFrame:
        if "main_cat_num" in df_pro:
            df_pro["main_cat_num_zh"] = df_pro.apply(self.process_mapping_cat_num_series, axis=1)
        else:
            df_pro["main_cat_num_zh"] = None
        df_pro["patent_category1"] = df_pro["main_cat_num_zh"].map(lambda x: (x or {}).get(1))
        df_pro["patent_category2"] = df_pro["main_cat_num_zh"].map(lambda x: (x or {}).get(2))
        df_pro["patent_category3"] = df_pro["main_cat_num_zh"].map(lambda x: (x or {}).get(3))
        df_pro["patent_category4"] = df_pro["main_cat_num_zh"].map(lambda x: (x or {}).get(4))
        return df_pro

    def _deal_merge_patent_df(self, df_pro: pd.DataFrame) -> pd.DataFrame:
        df_pro = df_pro.sort_values(
            ["app_number", "grant_date", "pub_date", "application_time"], ascending=False, inplace=False
        )
        parent_one_app_number_data = {}
        single_list = df_pro.to_dict(orient="records")
        for i in single_list:
            for k, v in i.items():
                if k in ["all_cat_num", "cat"]:
                    parent_one_app_number_data[k] = [self.process_cat_num(i) for i in v] if v else []
                if k == "main_cat_num":
                    parent_one_app_number_data[k] = self.process_cat_num(v)
                if k in ["title", "abstracts"] or not parent_one_app_number_data.get(k):
                    parent_one_app_number_data[k] = v

        if "pub_date" in parent_one_app_number_data and parent_one_app_number_data["pub_date"]:
            parent_one_app_number_data["patent_status"] = "公布"
            if "grant_date" in parent_one_app_number_data and parent_one_app_number_data["grant_date"]:
                parent_one_app_number_data["patent_status"] = "授权"

        return pd.DataFrame([parent_one_app_number_data])

    def loader_and_fill_all_law_data_df(self, df_pro: pd.DataFrame) -> pd.DataFrame:
        if df_pro.empty:
            return df_pro

        if "app_number" not in df_pro:
            return df_pro

        df_pro_not_duplicate = df_pro.drop_duplicates(subset=["app_number", "company_name"])
        df_pro_not_duplicate = df_pro_not_duplicate.groupby(["app_number", "company_name"], group_keys=True).apply(
            self._deal_merge_patent_df
        )

        app_num_list = ",".join([f'"{i}"' for i in df_pro["app_number"].unique().tolist()])

        if not app_num_list:
            return df_pro_not_duplicate

        simple_sql = f"""SELECT date, status, detail, app_number FROM net_patent_law_status WHERE app_number in ({app_num_list}) order by date desc"""

        df_pro_law = pd.DataFrame(self.db.find(simple_sql, to_json=True))

        if df_pro_law.empty:
            return df_pro_not_duplicate

        df_pro_law_dict = (
            df_pro_law.groupby("app_number", sort=True)
            .apply(lambda df: df.set_index("app_number").to_dict(orient="records"))
            .to_dict()
        )

        df_pro_not_duplicate["law_status"] = (
            df_pro_not_duplicate["app_number"].map(df_pro_law_dict).replace({np.NAN: None})
        )

        return df_pro_not_duplicate

    def first_fill_tax_id(self, coms) -> List:
        # logger.debug("query company_name start ...")
        # df = self.query_tax_id_by_tonghuashun(coms)
        df = self.query_tax_id_by_api(coms)
        if df.empty:
            return []
        # logger.debug("query company_name end ...")
        df["industry_all"] = df["industry_name"].map(lambda x: self.industry_model.get(x))
        return df.to_dict("records")

    def query_tax_id_by_tonghuashun(self, coms) -> pd.DataFrame:
        com_name_list_str = ",".join([f"'{i}'" for i in coms])
        if not com_name_list_str:
            return pd.DataFrame()
        sql = """ SELECT corp_name AS company_name, unified_social_credit_code AS taxpayer_id, third_level_industry as industry_name, used_name FROM enterprise_basic_info  WHERE corp_name IN ({com_name_list_str}) AND isvalid = 1  UNION ALL  SELECT corp_name AS company_name, unified_social_credit_code AS taxpayer_id, industry as industry_name, used_name FROM individual_business_info  WHERE corp_name IN ({com_name_list_str}) AND isvalid = 1""".format(
            com_name_list_str=com_name_list_str
        )
        return pd.DataFrame(self.pg_db.find(sql, to_json=True)).drop_duplicates()

    def query_tax_id_by_api(self, coms) -> pd.DataFrame:
        """
         通过 api获取企业信息 需要包含 company_name taxpayer_id industry_name 列
        :param coms: 企业名称列表
        """
        dataset = []
        res = EsCompanyTools().get_company_name_or_taxpayer_id(coms)
        for com in coms:
            dataset.append(
                {
                    "taxpayer_id": res["taxpayer_id_mapping"].get(com),
                    "industry_name": res["industry_mapping"].get(com),
                    "company_name": res["company_name_mapping"].get(com),
                    "used_name": res["used_name_mapping"].get(com),
                }
            )
        return pd.DataFrame(dataset)

    def _get_com_info_from_tyc(self, com):
        response = requests.get(
            f"{PUBLIC_OPINION_URL}/v3/tyc/search/", params={"query_key": com, "need_keys": "base_info"}
        ).json()
        ret = response.get("data", {}).get("base_info", {}).get("result", {})
        if ret:
            return {
                "taxpayer_id": ret.get("creditCode"),
                "industry_name": ret.get("industry"),
                "company_name": ret.get("com"),
            }

    @staticmethod
    def process_patent_type(df_pro: pd.DataFrame) -> pd.DataFrame:
        patent_type_list = ["发明公布", "发明授权", "外观设计", "实用新型"]

        def _process_patent_type(patent_type):
            for i in patent_type_list:
                if i in patent_type:
                    patent_type = i
                else:
                    patent_type = patent_type
            return patent_type

        if "patent_type" in df_pro.columns:
            df_pro["patent_type"] = df_pro["patent_type"].apply(lambda x: _process_patent_type(x))
        return df_pro

    def fill_law_data_to_src_data(self, df_pro):
        # 拿到实际的法律状态去反向填充授权或发布状态(补充逻辑)
        if "law_status" not in df_pro:
            return df_pro

        def _fill_patent_type(ser):
            law_status, patent_type, patent_status, app_number = ser[
                ["law_status", "patent_type", "patent_status", "app_number"]
            ]
            law_status = law_status or []
            law_status = [i for i in law_status if i.get("status") == "授权"]
            if law_status and patent_status == "公布":
                # todo 查询 授权号填充
                sql = (
                    f"select grant_number,grant_date from net_patent_pro where app_number = '{app_number}' "
                    f"and grant_number is not null"
                )
                fix_data = self.db.find(sql, to_json=True)
                if fix_data:
                    ser["grant_number"] = fix_data[0]["grant_number"]
                    ser["grant_date"] = fix_data[0]["grant_date"]
                    ser["patent_type"] = "发明授权"
                    ser["patent_status"] = "授权"
            return ser

        df_pro = df_pro.apply(_fill_patent_type, axis=1)
        return df_pro

    def run(self, com):
        if isinstance(com, str):
            coms = [com]
        else:
            coms = com
        com_tax_ids = self.first_fill_tax_id(coms)

        used_name = []
        for i in com_tax_ids:
            used_name.extend((i["used_name"] or "").split(","))
        used_name = list(set([i for i in used_name if i]))
        all_coms = set([f"'{i['company_name']}'" for i in com_tax_ids] + [f"'{i}'" for i in used_name])
        all_coms = set(list(all_coms) + list([self._replace_zh(i) for i in all_coms]))

        df_pro = self.loader_all_data_df(com_tax_ids)
        df_pro = self.loader_and_fill_all_law_data_df(df_pro)
        df_pro = self.fill_law_data_to_src_data(df_pro)
        df_pro = self.fill_main_cat_num_zh(df_pro)
        df_pro = self.process_patent_type(df_pro)
        df_pro = LawStatusWash(df_pro).run()
        # df_pro["abstracts"] = df_pro["abstracts"].map(lambda x: (x or {}).get("content"))
        try:
            ret = [
                {**{k: v for k, v in i.items() if k in list(PatentOutInfo.model_fields.keys())}, "valid": 1}
                for i in df_pro.to_dict("records")
            ]
            # ret = [{**i, "valid": 1} for i in df_pro.to_dict("records")]
            if ret:
                for one_com in all_coms:
                    self.db.update_smart(
                        "net_patent_clean", {"valid": 0}, condition=f"company_name = {one_com} and valid = 1"
                    )
                    # self.db.delete(f"delete from net_patent_clean where company_name = {one_com}")

                tax_ids = df_pro["taxpayer_id"].unique().tolist()
                tax_ids_str = ",".join([f"'{i}'" for i in tax_ids if i])
                self.db.update_smart("net_patent_clean", {"valid": 0}, condition=f"taxpayer_id in ({tax_ids_str})")
                self.db.add_batch_smart(
                    "net_patent_clean", ret, update_columns=list(PatentOutInfo.model_fields.keys()) + ["valid"]
                )

                # todo 发卡夫卡
                # tax_ids = df_pro["taxpayer_id"].unique().tolist()
                # tax_ids_str = ",".join([f"'{i}'" for i in tax_ids if i])
                # sql = "select * from net_patent_clean where taxpayer_id in (%s)" % tax_ids_str
                # datas = self.db.find(sql, to_json=True)
                # df_out = pd.DataFrame(datas)
                # df_out_last = df_out.sort_values("update_time").drop_duplicates(subset=["app_number"], keep="last")
                # out_list = df_out_last.drop(columns=["company_name"]).to_dict("records")
                # print(out_list)
        except Exception as e:
            if self.auto_fill is True:
                new_task_list = []
                for name in coms:
                    new_task_list.append({"query_key": name, "taxpayer_id": None})
                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd("scrapy:task:tyc:patent_search:start_urls", {json.dumps(new_task, ensure_ascii=False): -1})

    def time_run(self, time_flag=datetime.datetime.now().date()):
        sql = (
            "select distinct query_key as company_name from net_patent_task_log where "
            f" update_time > '{time_flag}'"
        )
        datas = self.db.find(sql, to_json=True)
        max_workers = 2
        logger.debug(f"定时清洗数量 {len(datas)}")
        tasks = []
        self.auto_fill = True
        # for idx, com_dict in enumerate(datas, start=1):
        #     print(f'{idx}/{len(datas)} {com_dict.get("company_name")}')
        #     self.run(com_dict.get("company_name").split("，"))
        from tqdm import tqdm
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            for com_dict in datas:
                tasks.append(executor.submit(self.run, com_dict.get("company_name").split("，")))

            for future in tqdm(
                    concurrent.futures.as_completed(tasks),
                    total=len(tasks),
            ):
                if future.exception():
                    raise future.exception()

    def time_fix_run(self, time_flag=datetime.datetime.now().date()):
        sql = (
            "select distinct company_name from net_patent_clean where law_status is null and update_time > '{}'".format(
                time_flag
            )
        )
        max_workers = 1
        datas = self.db.find(sql, to_json=True)
        logger.debug(f"定时清洗修复数量 {len(datas)}")
        if datas:
            # 修复法律状态为空的数据,重新爬取
            # subprocess.run("scrapy crawl parent_sw".split(), check=True)
            with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
                for com_dict in datas:
                    executor.submit(self.run, com_dict.get("company_name"))

    def fix_tmp(self):
        sql = "select distinct company_name from net_patent_clean"
        datas = self.db.find(sql, to_json=True)
        batch_size = 1
        logger.debug(f"定时清洗数量 {len(datas)}")
        with concurrent.futures.ThreadPoolExecutor(max_workers=batch_size) as executor:
            for i in datas:
                executor.submit(self.run, i["company_name"])


if __name__ == "__main__":
    test = CleanPatentModel()
    coms_list = [
        "东泰（青岛）农产有限公司",
        # "北京阿格蕾雅科技发展有限公司",
        # "嘉兴弈承机电科技有限公司",
        # "广东阿格蕾雅光电材料有限公司",
        # "江西优电成套设备有限公司",
        # "深圳市宏创微电子科技有限责任公司",
        # "深圳市恩联线缆有限公司",
        # "深圳微众信用科技股份有限公司",
        # "深圳微众税银信息服务有限公司",
        # "深圳供电局有限公司",
        # "美瑞科技(河南)有限公司",
        # "广东博鼎建筑科技有限公司",
    ]
    # test.run(coms_list[0])
    time_flag = datetime.datetime.now().date() + datetime.timedelta(days=-60)
    test.time_run(time_flag=time_flag)
    # test.time_fix_run(time_flag=time_flag)
