import datetime
import re
import time
from urllib import parse

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update, Mysql
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

__all__ = ["nstl_nstllist_callback",
           "nstl_nstlarticle_callback",
           "cnkipatent_cnkizlhome_callback",
           "cnkipatent_cnkizllist_callback",
           "cnkipatent_cnkizlarticle_callback",
           "isticword_isticword_home_callback",
           "isticword_isticword_list_callback",
           "isticword_isticword_subclass_callback",
           "cnkipatent_cnkizlarticle_callback",
           "cnkipatent_cnkizlarticle_etl_callback",
           "wanfangmedbs_wanfangmedbshomeinit_callback",
           "wanfangmedbs_wanfangmedbshome_callback",
           "wanfangmedbs_wanfangmedbslist_callback",
           "cnkiccndpaper_cnkipaperclass_callback",
           "cnkiccndpaper_cnkipaperlist_callback",
           "cnkiccndpaper_cnkipapersublist_callback",
           "cnkiccndpaper_cnkipaperarticle_callback",
           "cnkiccndpaper_cnkipaperarticle_etl_callback",
           "cnkithesis_cnkithesisclass_callback",
           "cnkithesis_cnkithesislist_callback",
           "cnkithesis_cnkithesissublist_callback",
           "cnkithesis_cnkithesisarticle_callback",
           "cnkithesis_cnkithesisarticle_etl_callback",
           "cnkicdfdthesis_cnkicdfdthesisclass_callback",
           "cnkicdfdthesis_cnkicdfdthesislist_callback",
           "cnkicdfdthesis_cnkicdfdthesissublist_callback",
           "cnkicdfdthesis_cnkicdfdthesisarticle_callback",
           "cnkicdfdthesis_cnkicdfdthesisarticle_etl_callback",
           "cnkiconference_cnkiconferenceclass_callback",
           "cnkiconference_cnkiconferencelist_callback",
           "cnkiconference_cnkiconferencesublist_callback",
           "cnkiconference_cnkiconferencearticle_callback",
           "cnkiconference_cnkiconferencearticle_etl_callback",
           "cnkiipfdconference_cnkiipfdconferenceclass_callback",
           "cnkiipfdconference_cnkiipfdconferencelist_callback",
           "cnkiipfdconference_cnkiipfdconferencesublist_callback",
           "cnkiipfdconference_cnkiipfdconferencearticle_callback",
           "cnkiipfdconference_cnkiipfdconferencearticle_etl_callback",
           "wanfangthesis_wanfangthesis_class_callback",
           "wanfangthesis_wanfangthesis_search_list_callback",
           "wanfangthesis_wanfangthesis_typelist_callback",
           "wanfangthesis_wanfangthesis_yearlist_callback",
           "wanfangthesis_wanfangthesis_search_sublist_callback",
           "wanfangthesis_wanfangthesis_search_article_callback",
           "wanfangthesis_wanfangthesis_search_article_etl_callback",
           "wanfangconference_wanfangconference_class_callback",
           "wanfangconference_wanfangconference_search_list_callback",
           "wanfangconference_wanfangconference_typelist_callback",
           "wanfangconference_wanfangconference_yearlist_callback",
           "wanfangconference_wanfangconference_search_sublist_callback",
           "wanfangconference_wanfangconference_search_article_callback",
           "wanfangconference_wanfangconference_search_article_etl_callback",
           "wanfangunconference_wanfangunconference_class_callback",
           "wanfangunconference_wanfangunconference_search_list_callback",
           "wanfangunconference_wanfangunconference_typelist_callback",
           "wanfangunconference_wanfangunconference_yearlist_callback",
           "wanfangunconference_wanfangunconference_search_sublist_callback",
           "wanfangunconference_wanfangunconference_search_article_callback",
           "wanfangunconference_wanfangunconference_search_article_etl_callback",
           "wanfangpatent_wanfangpatent_search_list_callback",
           "wanfangpatent_wanfangpatent_typelist_callback",
           "samropenstdstandard_samropenstdstandard_list_callback",
           "samropenstdstandard_samropenstdstandard_article_callback",
           "samropenstdstandard_samropenstdstandard_article_etl_callback",
           "nstrsreport_nstrsreport_list_callback",
           "nstrsreport_nstrsreport_article_callback",
           "nstrsreport_nstrsreport_article_etl_callback",

           "rscbook_rscbooklist_callback",
           "rscbook_rscbookarticle_callback",
           "rscbook_rscbookarticle_etl_callback",

           "cambridgebook_article_etl_callback",
           "ieeeconference_article_etl_callback",
           ]

import json
import math
import unicodedata

from apps.crawler_platform.core_platform.core_g import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import CallBackModel, OtherListModel, DealModel, DealInsertModel, \
    OtherHomeModel, DealUpdateModel, OperatorSqlModel, OtherArticleModel, EtlDealModel, OtherSubclassModel, \
    OtherArticleSpeModel
from apps.crawler_platform.core_callback.oversea_parse.wos_parse import parse_wosjournal_ref,parse_wosjournal_woskeywordsitem_article
from apps.crawler_platform.core_callback.oversea_parse.scopus_parse import parse_scopusjournal_ref,parse_scopusjournal_article_csv



def is_chinese(check_str):
    zhmodel = re.compile(u'[\u4e00-\u9fa5]')
    match_1 = zhmodel.search(check_str)
    if match_1:
        return True
    return False

def get_journal_rawid(value):
    result = ""
    result_raw = result
    result = BaseUrl.urlQuery2Dict(value)
    if isinstance(result, dict) and "baseid" in result.keys():
        result = result["baseid"]
    elif result_raw.find("index"):
        result = result_raw.split("/")[-1]
    else:
        result = ""
    return result


def cnkithesis_cnkithesisclass_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data["lblPageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            info_dicts = {
                "list_rawid": sql_model.list_rawid,
                "task_name": sql_model.task_name,
                "task_tag": sql_model.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "home_json": sql_model.home_json,
            }
            d_i_model = DealInsertModel()
            d_i_model.insert_pre = CoreSqlValue.insert_ig_it
            for i in range(1, int(total_page) + 1):
                temp = info_dicts.copy()
                temp["page"] = total_page
                temp["page_index"] = i
                d_i_model.lists.append(temp)
            result.befor_dicts.insert.append(d_i_model)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            href = item["href"].strip()
            school_name = item["school_name"].strip()
            baseid = href.split('&')[1].replace("baseid=", "")
            if len(baseid) != 0:
                home_json = json.loads(sql_model.home_json)
                home_json["school_name"] = school_name
                home_json["baseid"] = baseid
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": baseid,
                    "page": 1,
                    "page_index": "-1",
                    "list_json": json.dumps(home_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
                # print(home_json)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkithesis_cnkithesislist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    list_json = json.loads(sql_model.list_json)
    # sqlcode = para_dicts["data"]["1_2"].get("sqlcode", "")
    sqlcode = ""
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        list_years = para_dicts["data"]["1_1"].get("children", "")
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in list_years:
            totalnum = item["totalnum"]
            if "(" not in totalnum:
                continue
            totalnum = int(totalnum.replace("(", "").replace(")", ""))
            total_page = int((totalnum + 49) / 50)
            result.code_dicts = {
                "1_1": {"max_page": total_page}
            }
            years = item['years']
            for page_index in range(1, total_page + 1):
                tmp_json = {
                    "years": years,
                    "page_index": page_index,
                    "baseid": list_json["baseid"],
                    "school_name": list_json["school_name"]
                }
                is_active = 0
                if page_index == 1:
                    tmp_json["sqlcode"] = sqlcode
                    tmp_json["HandlerId"] = "2"
                    tmp_json["IsSearch"] = "true"
                    is_active = 1
                else:
                    tmp_json["HandlerId"] = "2"
                    tmp_json["IsSearch"] = "false"
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}".format(list_json["baseid"], years),
                    "page": total_page,
                    "page_index": page_index,
                    "is_active": is_active,
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkithesis_cnkithesissublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    html = callmodel.para_dicts["data"]["1_1"].get("html", "")
    sel = Selector(html)
    # sqlcode = sel.xpath("//input[@id='sqlVal']/@value").extract()[0]
    sqlcode = ""
    tr_rs = sel.xpath("//table/tbody/tr")
    if tr_rs and len(tr_rs) > 0:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for new in tr_rs:
            dbcode = ""
            filename = ""
            # 获取链接
            href = new.xpath("./td[@class='name']//a/@href").get("").strip()
            dictss = BaseUrl.urlQuery2Dict(href)
            value = dictss["v"]  # 一个新的id
            # for hr in hrefs:
            #     if "dbcode" in hr:
            #         dbcode = hr.split("=")[1]
            #     elif "filename" in hr:
            #         filename = hr.split("=")[1]
            shoucang_a = new.xpath("./td[@class='operat']/a[@title='收藏']")
            if checkExist(shoucang_a):
                filename = shoucang_a.xpath("./@data-filename").get("").strip().lower()
                dbcode = shoucang_a.xpath("./@data-dbname").get("").strip().lower()
            if filename != '' and dbcode != '':
                article_json = subclass_json.copy()
                tmp_json = {
                    "dbcode": dbcode.upper(),
                    "value": value
                }
                article_json.update(tmp_json)
                del article_json["page_index"]
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "rawid": filename,
                    "article_info_json": json.dumps(article_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
                if subclass_json["page_index"] == 1:
                    du_model = DealUpdateModel()
                    duplicte = json_update({"sqlcode": sqlcode})
                    du_model.update_no_placeholder.update(
                        {
                            "subclass_json": f"JSON_SET(subclass_json, {duplicte})",
                            "is_active": "1"
                        })
                    oplist = []
                    op1 = OperatorSqlModel()
                    op1.key = "task_name"
                    op1.value = sql_model.task_name
                    op1.operator = "="
                    oplist.append(op1)
                    op2 = OperatorSqlModel()
                    op2.key = "task_tag"
                    op2.value = sql_model.task_tag
                    op2.operator = "="
                    oplist.append(op2)
                    op3 = OperatorSqlModel()
                    op3.key = "list_rawid"
                    op3.value = sql_model.list_rawid
                    op3.operator = "="
                    oplist.append(op3)
                    op4 = OperatorSqlModel()
                    op4.key = "page_index"
                    op4.value = subclass_json['page_index']
                    op4.operator = "!="
                    oplist.append(op4)
                    du_model.where = oplist
                    result.befor_dicts.update_list.append(du_model)
                du_model = DealUpdateModel()
                duplicte = json_update(tmp_json)
                du_model.update_no_placeholder.update(
                    {"article_info_json": f"JSON_SET(article_info_json, {duplicte})"})
                du_model.where = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "rawid": filename
                }
                result.next_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkithesis_cnkithesisarticle_callback(callmodel: CallBackModel[OtherArticleSpeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    # 解析返回的数据
    p_data = callmodel.para_dicts["data"]
    article_json = callmodel.sql_model.article_info_json
    article_info_dict = json.loads(article_json)
    if "1_1" in p_data:
        dicts = p_data["1_1"]
        dicts["kcmslink"] = dicts["kcmslink"].split("&")[0].replace("v=", "")
        dicts["customlink"] = dicts["customlink"].split("&")[0].replace("doc-list-recVideo:v=", "")
        dicts["citationapivv"] = dicts["citationapivv"]
        dicts["citationapiclientId"] = dicts["citationapiclientId"]
        dicts["filename"] = dicts["filename"]
        dicts["size"] = 10  # 引文下载需要
        dicts["start"] = 1  # 引文下载需要
        dicts["type"] = ""  # 引文下载需要
        article_info_dict.update(dicts)
        # article_info_dict.update({
        #     "dbname": dicts["dbname"],
        #     "filename": dicts["filename"],
        #     'vl': dicts["vl"],
        #     'uid': dicts["uid"],
        #     'curdbcode': dicts["curdbcode"],
        #     'page': dicts["page"]
        # })
        # print(article_json)
    if "1_2" in p_data:
        info_str = p_data["1_2"]["html"]
        # print(info_str)
        info = json.loads(info_str)
        REFERENCE = 0
        CITING = 0
        if "REFERENCE" in info.keys():
            REFERENCE = info["REFERENCE"]
            CITING = info["CITING"]
        else:
            for item in info["data"]:
                if item["name"] == "references":
                    REFERENCE = item["value"]
                if item["name"] == "citations":
                    CITING = item["value"]
        article_info_dict.update({"refcount": REFERENCE, "citecount": CITING})
    if "1_3" in p_data:
        pass
    if "1_4" in p_data:
        pass
    result.befor_dicts.update["article_info_json"] = json.dumps(article_info_dict, ensure_ascii=False)
    return result


def cnkicdfdthesis_cnkicdfdthesisclass_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    return cnkithesis_cnkithesisclass_callback(callmodel)


def cnkicdfdthesis_cnkicdfdthesislist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return cnkithesis_cnkithesislist_callback(callmodel)


def cnkicdfdthesis_cnkicdfdthesissublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    return cnkithesis_cnkithesissublist_callback(callmodel)


def cnkicdfdthesis_cnkicdfdthesisarticle_callback(callmodel: CallBackModel[OtherArticleSpeModel]) -> DealModel:
    return cnkithesis_cnkithesisarticle_callback(callmodel)


def cnkithesis_cnkithesisarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_info_json"])
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    data_refcnt = para_dicts["data"]["1_2"]
    data_ref = down_model["1_3"].dict()
    # data_cited = down_model["1_4"].dict()
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    REFERENCE = 0
    CITING = 0
    if "REFERENCE" in data_refcnt.keys():
        REFERENCE = data_refcnt.get("REFERENCE", "0")
        CITING = data_refcnt.get("CITING", "0")
    else:
        for item in data_refcnt["data"]:
            if item["name"] == "references":
                REFERENCE = item["value"]
            if item["name"] == "citations":
                CITING = item["value"]
    data["ref_cnt"] = str(REFERENCE)
    cited_cnt = str(CITING)
    if cited_cnt.isdigit():
        cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00075"
    product = "CNKI"
    sub_db = "CMFD"
    provider = "CNKI"
    source_type = "4"
    data["is_deprecated"] = "0"
    rawid = data["rawid"]
    data["rawid_mysql"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    # data["provider_url"] = "http://kns.cnki.net/kcms/detail/detail.aspx?dbcode=" + data["dbcode"] + "&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    data["degree"] = "硕士"
    sel = Selector(src_data.html)

    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'


    pub_year = article_json["years"]
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    # else:
    #     pub_year = "1900"
    #     pub_date = pub_year + "0000"
    data["pub_year"] = pub_year
    data["pub_date"] = pub_date
    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    author = ""
    author_id = ""
    author_1st = ""
    list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")

    if checkExist(list_au_span):
        for au_span in list_au_span:
            au_a = au_span.xpath("./a")
            au_code = au_span.xpath("./input[@class='authorcode']/@value").get()
            if checkExist(au_a):
                au_name = au_a.xpath("./text()").get().strip()
                au_href = au_a.xpath("./@href").get().strip()
                if "author" in au_href:
                    author += au_name + ";"
                    if not checkExist(au_code):
                        click_au = au_a.xpath("./@onclick").get()
                        if checkExist(click_au):
                            tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au.strip())
                            if checkExist(tmp_click_au):
                                au_code = tmp_click_au[0].strip()
                    if checkExist(au_code):
                        author_id += au_code + "@" + au_name + ";"
            else:
                au_name = au_span.xpath("./text()").get().strip()
                if "大学" not in au_name:
                    author += au_name + ";"
    author = cleanSemicolon(author)
    author_id = cleanSemicolon(author_id)
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info',[])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    data["author"] = author
    data["author_id"] = author_id
    data["author_1st"] = author_1st
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    if checkExist(organ_h3):
        list_og_a = organ_h3.xpath("./a")
        if not checkExist(list_og_a):
            list_og_a = organ_h3.xpath("./span/a")
        if checkExist(list_og_a):
            for a_og in list_og_a:
                og_code = organ_h3.xpath("./span/input[@class='authorcode']/@value").get()
                og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
                click_og = a_og.xpath("./@onclick").get()
                og_href = a_og.xpath("./@href").get().strip()
                if "organ" in og_href:
                    if not checkExist(og_code) and checkExist(click_og) and "'in'" in click_og:
                        tmp_click_og = re.findall(r"^.*','(\d+)'.*\);$", click_og.strip())
                        if checkExist(tmp_click_og):
                            og_code = tmp_click_og[0].strip()
                    if checkExist(og_code) > 0:
                        organ_id += og_code + "@" + re.sub(r"^((\d+)\.\s+)", "", og_name) + ";"
                    og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
                    organ += og_name
        else:
            list_span_og = organ_h3.xpath("./span/text()")
            if checkExist(list_span_og):
                for span_og in list_span_og:
                    organ += span_og.get() + ";"
    organ = cleanSemicolon(organ)
    organ_id = cleanSemicolon(organ_id)
    if len(organ) > 0:
        vec = organ.split(";")
        if len(vec) > 0:
            organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = organ_1st
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    abstract_ = "".join(sel.xpath('//span[@id="ChDivSummary"]//text()').extract())
    data["abstract"] = abstract_
    contributor = ''.join(sel.xpath('//span[text()="导师："]/parent::div[1]/p//text()').extract()).strip().replace("；", ";")
    if contributor.endswith(";"):
        contributor = contributor[0:-1]
    data["contributor"] = contributor
    fund = cleanSemicolon(data["fund"]).replace("；；", ";").replace("；", ";")
    if not fund:
        fund = ''.join(sel.xpath('//p[@class="funds"]//text()').extract()).strip()
        fund = cleanSemicolon(fund).replace("；；", ";").replace("；", ";")
    if fund.endswith(";"):
        fund = fund[0:-1]
    data["fund"] = fund
    clc_no = ''.join(sel.xpath('//span[text()="分类号："]/parent::li[1]/p//text()').extract()).strip()
    clc_no = cleanSemicolon(clc_no).replace("-", "")
    clc_no = unicodedata.normalize('NFKC', clc_no)
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["doi"] = ''.join(sel.xpath('//span[text()="DOI："]/parent::li[1]/p//text()').extract()).strip().replace("-", ";")
    subject = cleanSemicolon(''.join(sel.xpath('//span[text()="专题："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if subject.endswith(";"):
        subject = subject[0:-1]
    sub_db_class_name = cleanSemicolon(
        ''.join(sel.xpath('//span[text()="专辑："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if sub_db_class_name.endswith(";"):
        sub_db_class_name = sub_db_class_name[0:-1]
    data["subject"] = subject
    data["sub_db_class_name"] = sub_db_class_name
    # 20250317
    subject_major = ''.join(sel.xpath('//span[contains(text(),"学科专业：")]/following::p[1]//text()').extract()).replace("；；", ";").replace("；", ";").strip()
    data["subject_major"] = subject_major[0:-1] if sub_db_class_name.endswith(";") else subject_major
    page_info = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页码：")]//text()').extract()).strip()).replace("页码：", "")
    data["page_info"] = page_info
    if '+' in page_info:
        jump_page = page_info.split("+")[-1]
        begin_page = page_info.split("+")[0].split("-")[0]
        end_page = page_info.split("+")[0].split("-")[-1]
    else:
        jump_page = ''
        begin_page = page_info.split("-")[0]
        end_page = page_info.split("-")[-1]
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页数：")]//text()').extract()).strip()).replace("页数：", "")
    down_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"下载：")]//text()').extract()).strip()).replace(
        "下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf']/a[contains(@name, 'pdf')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj']/a[contains(@name, 'caj')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-dlhtml']/a[contains(@name, 'html')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkithesis_cnkithesisarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkithesis_cnkithesisarticle_etl_callback 解析title出错"
    elif len(data["dbcode"]) < 1:
        err_msg = "cnkithesis_cnkithesisarticle_etl_callback 解析dbcode出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    idx = 0
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref, repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": lngid,
            "table": "other_ref_latest"
        }
    result.save_data = save_data
    return result


def getStrtype(tp, tpcn):
    stdict = {
        "CJFQ": "J",
        "CJFD": "J",
        "SSJD": "J",
        "CDFD": "D",
        "CMFD": "D",
        "CPFD": "C",
        "IPFD": "C",
        "SCPD": "P",
        "SCSD": "S",
        "CBBD": "M",
        "期刊": "J",
        "国际期刊": "J",
        "硕士": "D",
        "博士": "D",
        "国内会议": "C",
        "国际会议": "C",
        "报纸": "N",
        "图书": "M",
        "中外文题录": "K",
    }
    strtype = stdict.get(tp, "")
    if len(strtype) == 0:
        strtype = stdict.get(tpcn, "")
    if len(strtype) == 0:
        strtype = "K"
    return strtype


def get_strtype(tp):
    sdic = {
        "CJFD":"期刊",
        "CJFQ":"期刊",
        "CDFD":"博士",
        "CMFD":"硕士",
        "CPFD":"中国会议",
        "IPFD":"国际会议",
        "CCND":"报纸",
        "CYFD":"年鉴",
        "SCPD":"中国专利",
        "SOPD":"国外专利",
        "SCSF":"国家标准",
        "SCHF":"行业标准",
        "SCSD":"中国标准",
        "SOSD":"国外标准",
        "SNAD":"科技成果",
        "SSJD":"国际期刊",
        "WFBREF":"中外文题录",
        "CRLDENG":"中外文题录",
        "BOOK":"图书",
        "CBBD": "图书",

    }
    stdict = {
        "CJFD": "J",
        "CJFQ": "J",
        "SSJD": "J",
        "JOURNAL": "J",
        "WWJD": "J",
        "QUOTATION_INT_JOURNAL": "J",
        "CDFD": "D",
        "CMFD": "D",
        "DISSERTATION_MD": "D",
        "DISSERTATION_PHD": "D",
        "CPFD": "C",
        "IPFD": "C",
        "CONFERENCE_INT": "C",
        "CONFERENCE_CHN": "C",
        "CCND": "N",
        "NEWSPAPER": "N",
        "SCPD": "P",
        "SOPD": "P",
        "PATENT_CHN": "P",
        "SCSF": "S",
        "SCHF": "S",
        "STANDARD_CHN": "S",
        "SCSD": "S",
        "SOSD": "S",
        "BOOK": "M",
        "CBBD": "M",
    }
    sub_dic = {
        "CJFD": "00002",
        "CJFQ": "00002",
        "CDFD": "00310",
        "CMFD": "00075",
        "CPFD": "00090",
        "IPFD": "00091",
        "CCND": "00080",
        "SCPD": "00003",
        "SCSF": "00085",
        "SCHF": "00086",
        "SCSD": "00083",
        "SOSD": "00084",
        "SNAD": "00275",
    }
    return stdict.get(tp,"K"),sdic.get(tp,""),sub_dic.get(tp,"")


def cnkiarticle_ref_info_parse(meta, allref, ref_down_date):
    ref_id = ""
    list_ref = []
    idx = 0
    ref_set = set()
    ref_dic = {}
    repeat_cnt = 0
    for k, v in allref.items():
        if len(v) > 0:
            v = json.loads(v)
            if "data" not in v or "data" not in v["data"]:
                continue
            rlist = v["data"]["data"]
            tp = k.split("REF")[0].upper()
            strtype,tpcn,ref_sub_db_id = get_strtype(tp)
            for data in rlist:
                ref_one = {}
                ref_one["sub_db_id"] = ref_sub_db_id
                ref_one["cited_rawid"] = meta["rawid"]
                ref_one["cited_lngid"] = meta["lngid"]
                ref_one["cited_pub_year"] = meta["pub_year"]
                ref_one["strtype_raw"] = tp
                ref_one["strtype_cn"] = tpcn
                ref_one["strtype"] = strtype
                # if "source" not in data:
                #     print("----", meta["rawid_mysql"])
                ref_one = cnkithesisarticle_ref_parse(ref_one, data)
                ref_one.pop("sub_db_id")
                refer_text_site = ref_one["refer_text_site"]
                if refer_text_site != "." and len(refer_text_site) > 1:
                    keys = "{}_{}".format(tpcn, refer_text_site)
                    if keys not in ref_dic.keys():
                        ref_dic[keys] = {
                            "has_id": {},
                            "no_id": {}
                        }
                    if len(ref_one["linked_id"]) > 0:
                        ref_dic[keys]["has_id"][ref_one["linked_id"] + "_" + ref_one["ref_index"]] = ref_one
                    else:
                        ref_dic[keys]["no_id"][ref_one["ref_index"]] = ref_one
    if len(ref_dic) > 0:
        for k, v in ref_dic.items():
            hasID_dic = v["has_id"]
            noID_dic = v["no_id"]
            if len(hasID_dic) >= 1:
                temp_sets = set()
                for k, v in hasID_dic.items():
                    ref_one = v
                    if ref_one["linked_id"] in temp_sets:
                        repeat_cnt = repeat_cnt + 1
                    else:
                        idx += 1
                        ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                        ref_one["lngid"] = ref_lngid
                        ref_one["keyid"] = ref_lngid
                        ref_id = ref_id + ref_lngid + ";"
                        list_ref.append(ref_one)
            if len(noID_dic) >= 1:
                is_first = True
                for k, v in noID_dic.items():
                    if not is_first:
                        repeat_cnt = repeat_cnt + 1
                        continue
                    ref_one = v
                    idx += 1
                    ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                    ref_one["lngid"] = ref_lngid
                    ref_one["keyid"] = ref_lngid
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
                    is_first = False
    return ref_id, list_ref, str(repeat_cnt)


def cnkithesisarticle_ref_parse(data, ref_json):
    if "source" in ref_json:
        source = ref_json["source"]
    else:
        source = {}
    strtype = data["strtype"]
    metadata = ref_json["metadata"]
    old_linked_id = get_json_val(metadata,"FN")
    data["old_linked_id"] = old_linked_id
    if len(old_linked_id) > 0 and len(data["sub_db_id"]) > 0:
        data["linked_id"] = BaseLngid().GetLngid(data["sub_db_id"], old_linked_id)
    else:
        data["linked_id"] = ""
    data["title"] = get_json_val(metadata,"TI")
    author = get_json_val(metadata,"AU").replace(";;",";").replace(",",";")
    if len(author) > 0 and author[-1] == ";":
        author = author[:-1]
    data["author"] = author
    site_source_name = ""
    pub_place = ""
    site_pub_year = ""

    if len(source) > 0:
        site_source_name = get_json_val(source,"title")
        site_pub_year = get_json_val(source,"year")
        data["source_name"] = site_source_name
        data["pub_year"] = site_pub_year
        vol = get_json_val(source,"volume")
        if vol is None:
            vol = ""
        data["vol"] = vol
        data["num"] = get_json_val(source,"issue")
        if strtype == "N":
            site_pub_year = get_json_val(metadata, "DT")
    else:
        site_pub_year = get_json_val(metadata, "YE")
        data["pub_year"] = site_pub_year
        data["vol"] = ""
        data["num"] = get_json_val(metadata, "QI")
        if strtype == "M":
            site_source_name = get_json_val(metadata, "LY")
            pub_place = get_json_val(metadata, "出版地")
            data["publisher"] = site_source_name
        elif strtype == "P":
            site_pub_year = get_json_val(metadata, "PD")
            pub_place = get_json_val(metadata, "DB")
            site_source_name = get_json_val(metadata, "GKH")
            data["source_name"] = site_source_name
        elif strtype == "S":
            site_source_name = get_json_val(metadata, "标准号")
            data["source_name"] = site_source_name
        else:
            site_source_name = get_json_val(metadata, "LY")
            data["source_name"] = site_source_name
    line = get_json_val(metadata,"PM")
    begin_page = ""
    end_page = ""
    jump_page = ""
    data["page_info"] = line
    idx = line.find('+')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["doi"] = get_json_val(metadata,"DOI")
    ref_index = get_json_val(metadata,"index")
    data["ref_index"] = ref_index
    refer_text_raw = json.dumps(ref_json, ensure_ascii=False)
    data["refer_text_raw"] = refer_text_raw
    refer_text_site = ""
    if strtype == "S":
        if len(site_source_name) > 0:
            refer_text_site += site_source_name + ","
        if len(data["title"]) > 0:
            refer_text_site += data["title"] + "[{}].".format(data["strtype"])
    else:
        if len(author) > 0:
            refer_text_site += author.replace(";",",") + "."
        if len(data["title"]) > 0:
            refer_text_site += data["title"] + "[{}].".format(data["strtype"])
        if len(pub_place) > 0:
            refer_text_site += pub_place + ":"
        if len(site_source_name) > 0:
            refer_text_site += site_source_name + ","
        if len(site_pub_year) > 0:
            refer_text_site += site_pub_year
        if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
            refer_text_site = refer_text_site[:-1]
        if strtype == "J":
            if len(data["vol"]) > 0:
                refer_text_site += "," + data["vol"]
            if len(data["num"]) > 0:
                refer_text_site += "({})".format(data["num"])
        if strtype in ("J","M","D","C"):
            if len(data["begin_page"]) > 0:
                refer_text_site += ":" + data["begin_page"]
                if len(data["end_page"]) > 0:
                    refer_text_site += "-" + data["end_page"]
    data["refer_text_site"] = refer_text_site
    return data


def cnkicdfdthesis_cnkicdfdthesisarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_info_json"])
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    data_refcnt = para_dicts["data"]["1_2"]
    data_ref = down_model["1_3"].dict()
    # data_cited = down_model["1_4"].dict()
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["ref_cnt"] = data_refcnt.get("REFERENCE", "")
    cited_cnt = data_refcnt.get("CITING", "")
    if cited_cnt.isdigit():
        cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00310"
    product = "CNKI"
    sub_db = "CDFD"
    provider = "CNKI"
    source_type = "4"
    data["is_deprecated"] = "0"
    rawid = data["rawid"]
    data["rawid_mysql"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    # data["provider_url"] = "http://kns.cnki.net/kcms/detail/detail.aspx?dbcode=" + data["dbcode"] + "&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    data["degree"] = "博士"
    sel = Selector(src_data.html)
    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'

    pub_year = article_json["years"]
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    # else:
    #     pub_year = "1900"
    #     pub_date = pub_year + "0000"
    data["pub_year"] = pub_year
    data["pub_date"] = pub_date
    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    author = ""
    author_id = ""
    author_1st = ""
    list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")
    if checkExist(list_au_span):
        for au_span in list_au_span:
            au_a = au_span.xpath("./a")
            au_code = au_span.xpath("./input[@class='authorcode']/@value").get()
            if checkExist(au_a):
                au_name = au_a.xpath("./text()").get().strip()
                au_href = au_a.xpath("./@href").get().strip()
                if "author" in au_href:
                    author += au_name + ";"
                    if not checkExist(au_code):
                        click_au = au_a.xpath("./@onclick").get()
                        if checkExist(click_au):
                            tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au.strip())
                            if checkExist(tmp_click_au):
                                au_code = tmp_click_au[0].strip()
                    if checkExist(au_code):
                        author_id += au_code + "@" + au_name + ";"
            else:
                au_name = au_span.xpath("./text()").get().strip()
                if "大学" not in au_name:
                    author += au_name + ";"
    author = cleanSemicolon(author)
    author_id = cleanSemicolon(author_id)
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info', [])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    data["author"] = author
    data["author_id"] = author_id
    data["author_1st"] = author_1st
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    if checkExist(organ_h3):
        list_og_a = organ_h3.xpath("./a")
        if not checkExist(list_og_a):
            list_og_a = organ_h3.xpath("./span/a")
        if checkExist(list_og_a):
            for a_og in list_og_a:
                og_code = organ_h3.xpath("./span/input[@class='authorcode']/@value").get()
                og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
                click_og = a_og.xpath("./@onclick").get()
                og_href = a_og.xpath("./@href").get().strip()
                if "organ" in og_href:
                    if not checkExist(og_code) and checkExist(click_og) and "'in'" in click_og:
                        tmp_click_og = re.findall(r"^.*','(\d+)'.*\);$", click_og.strip())
                        if checkExist(tmp_click_og):
                            og_code = tmp_click_og[0].strip()
                    if checkExist(og_code) > 0:
                        organ_id += og_code + "@" + re.sub(r"^((\d+)\.\s+)", "", og_name) + ";"
                    og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
                    organ += og_name
        else:
            list_span_og = organ_h3.xpath("./span/text()")
            if checkExist(list_span_og):
                for span_og in list_span_og:
                    organ += span_og.get() + ";"
    organ = cleanSemicolon(organ)
    organ_id = cleanSemicolon(organ_id)
    if len(organ) > 0:
        vec = organ.split(";")
        if len(vec) > 0:
            organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = organ_1st
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    abstract_ = "".join(sel.xpath('//span[@id="ChDivSummary"]//text()').extract())
    data["abstract"] = abstract_
    contributor = ''.join(sel.xpath('//span[text()="导师："]/parent::div[1]/p//text()').extract()).strip().replace("；", ";")
    if contributor.endswith(";"):
        contributor = contributor[0:-1]
    data["contributor"] = contributor
    fund = cleanSemicolon(data["fund"]).replace("；；", ";").replace("；", ";")
    if not fund:
        fund = ''.join(sel.xpath('//p[@class="funds"]//text()').extract()).strip()
        fund = cleanSemicolon(fund).replace("；；", ";").replace("；", ";")
    if fund.endswith(";"):
        fund = fund[0:-1]
    data["fund"] = fund
    clc_no = ''.join(sel.xpath('//span[text()="分类号："]/parent::li[1]/p//text()').extract()).strip()
    clc_no = cleanSemicolon(clc_no).replace("-", "")
    clc_no = unicodedata.normalize('NFKC', clc_no)
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["doi"] = ''.join(sel.xpath('//span[text()="DOI："]/parent::li[1]/p//text()').extract()).strip().replace("-", ";")
    subject = cleanSemicolon(''.join(sel.xpath('//span[text()="专题："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if subject.endswith(";"):
        subject = subject[0:-1]
    sub_db_class_name = cleanSemicolon(''.join(sel.xpath('//span[text()="专辑："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if sub_db_class_name.endswith(";"):
        sub_db_class_name = sub_db_class_name[0:-1]
    data["subject"] = subject
    data["sub_db_class_name"] = sub_db_class_name
    subject_major = ''.join(sel.xpath('//span[contains(text(),"学科专业：")]/following::p[1]//text()').extract()).replace("；；", ";").replace("；", ";").strip()
    data["subject_major"] = subject_major[0:-1] if sub_db_class_name.endswith(";") else subject_major
    page_info = cleanSemicolon(
        ''.join(sel.xpath('//span[contains(text(),"页码：")]//text()').extract()).strip()).replace("页码：", "")
    data["page_info"] = page_info
    if '+' in page_info:
        jump_page = page_info.split("+")[-1]
        begin_page = page_info.split("+")[0].split("-")[0]
        end_page = page_info.split("+")[0].split("-")[-1]
    else:
        jump_page = ''
        begin_page = page_info.split("-")[0]
        end_page = page_info.split("-")[-1]
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页数：")]//text()').extract()).strip()).replace("页数：", "")
    down_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"下载：")]//text()').extract()).strip()).replace("下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf']/a[contains(@name, 'pdf')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj']/a[contains(@name, 'caj')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-dlhtml']/a[contains(@name, 'html')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkicdfdthesis_cnkicdfdthesisarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkicdfdthesis_cnkicdfdthesisarticle_etl_callback 解析title出错"
    elif len(data["dbcode"]) < 1:
        err_msg = "cnkicdfdthesis_cnkicdfdthesisarticle_etl_callback 解析dbcode出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref, repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["repeat_cnt"] = repeat_cnt
        ref_data["refer_info"] = list_ref
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": lngid,
            "table": "other_ref_latest"
        }
    result.save_data = save_data
    return result


def cnkiconference_cnkiconferenceclass_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data["lblPageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            info_dicts = {
                "list_rawid": sql_model.list_rawid,
                "task_name": sql_model.task_name,
                "task_tag": sql_model.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "home_json": sql_model.home_json,
            }
            d_i_model = DealInsertModel()
            d_i_model.insert_pre = CoreSqlValue.insert_ig_it
            for i in range(1, int(total_page) + 1):
                temp = info_dicts.copy()
                temp["page"] = total_page
                temp["page_index"] = i
                d_i_model.lists.append(temp)
            result.befor_dicts.insert.append(d_i_model)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            result.befor_dicts.update.update({'page': total_page})
            meeting_record_name = item['meeting_record_name'].strip()
            if len(meeting_record_name) != 0:
                hrefs = item['href'].strip().split("=")
                tmps = hrefs[-1].split(",")
                meeting_record_code = tmps[0].strip()
                meeting_code = ""
                if len(tmps) > 1:
                    meeting_code = tmps[1].strip()
                home_json = json.loads(sql_model.home_json)
                home_json["meeting_record_code"] = meeting_record_code
                home_json["journal_raw_id"] = meeting_record_code
                home_json["meeting_code"] = meeting_code
                home_json["lwjcode"] = ""
                home_json["meeting_record_name"] = meeting_record_name
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": meeting_record_code,
                    "page": total_page,
                    "page_index": "-1",
                    "list_json": json.dumps(home_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiconference_cnkiconferencelist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    list_json = json.loads(sql_model.list_json)
    # print(para_dicts["data"])
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"].get("html", "")
        sel = Selector(text=html)
        lwjcode = cleanSemicolon(sel.xpath("//li[@lwjcode='{}']/@id".format(list_json["meeting_record_code"])).get())
        du_model = DealUpdateModel()
        du_model.where.update({"list_rawid": sql_model.list_rawid,
                               "task_tag": task_info.task_tag,
                               "task_name": task_info.task_name,
                               })
        list_json["lwjcode"] = lwjcode
        duplicte = json_update(list_json)
        du_model.update_no_placeholder.update({"list_json": f"JSON_SET(list_json, {duplicte})"})
        result.befor_dicts.update_list.append(du_model)
    if "1_2" in para_dicts["data"]:
        html = para_dicts["data"]["1_2"].get("html", "")
        sel = Selector(text=html)
        p_info = sel.xpath("//p[@class='hostUnit']")
        if checkExist(p_info):
            for p in p_info:
                txt_tag = p.xpath("./label/text()")
                if checkExist(txt_tag):
                    tag = txt_tag.extract()[0].strip()
                    val = cleanSemicolon(p.xpath("./span/text()").get().replace(" ", ""))
                    if "会议名称" in tag:
                        list_json["meeting_name"] = val
                    elif "会议时间" in tag:
                        list_json["meeting_date_raw"] = val
                    elif "学会名称" in tag:
                        list_json["society"] = val
                    elif "会议地点" in tag:
                        list_json["meeting_place"] = val
                    elif "出版单位" in tag:
                        list_json["publisher"] = val
                    elif "出版日期" in tag:
                        list_json["pub_date"] = val
        du_model = DealUpdateModel()
        du_model.where.update({"list_rawid": sql_model.list_rawid,
                               "task_tag": task_info.task_tag,
                               "task_name": task_info.task_name,
                               })
        duplicte = json_update(list_json)
        du_model.update_no_placeholder.update({"list_json": f"JSON_SET(list_json, {duplicte})"})
        result.befor_dicts.update_list.append(du_model)
    if "1_3" in para_dicts["data"]:
        # sqlcode = para_dicts["data"]["1_4"].get("sqlcode", "")
        sqlcode = ""
        list_years = para_dicts["data"]["1_3"].get("children", "")
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in list_years:
            totalnum = item["totalnum"]
            if "(" not in totalnum:
                continue
            totalnum = int(totalnum.replace("(", "").replace(")", ""))
            total_page = int((totalnum + 49) / 50)
            result.code_dicts = {
                "1_1": {"max_page": total_page}
            }
            years = item['years']
            for page_index in range(1, total_page + 1):
                tmp_json = {
                    "years": years,
                    "page_index": page_index,
                    "journal_raw_id": list_json["meeting_record_code"],
                    "meeting_record_code": list_json["meeting_record_code"],
                    "meeting_record_name": list_json["meeting_record_name"]
                }
                is_active = 0
                if page_index == 1:
                    tmp_json["sqlcode"] = sqlcode
                    tmp_json["HandlerId"] = "18"
                    tmp_json["IsSearch"] = "true"
                    is_active = 1
                else:
                    tmp_json["HandlerId"] = "18"
                    tmp_json["IsSearch"] = "false"
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}".format(list_json["meeting_record_code"], years),
                    "page": total_page,
                    "page_index": page_index,
                    "is_active": is_active,
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiconference_cnkiconferencesublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    html = callmodel.para_dicts["data"]["1_1"].get("html", "")
    sel = Selector(html)
    # sqlcode = sel.xpath("//input[@id='sqlVal']/@value").extract()[0]
    sqlcode = ""
    tr_rs = sel.xpath("//table/tbody/tr")
    if tr_rs and len(tr_rs) > 0:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for new in tr_rs:
            # filename = ""
            # # 获取链接
            # href = new.xpath("./td[@class='name']/a/@href").get("").strip().lower()
            # hrefs = href.split("?")[1].split("&")
            # for hr in hrefs:
            #     if "filename" in hr:
            #         filename = hr.split("=")[1]
            dbcode = ""
            filename = ""
            # 获取链接
            href = new.xpath("./td[@class='name']//a/@href").get("").strip()
            dictss = BaseUrl.urlQuery2Dict(href)
            value = dictss["v"]  # 一个新的id
            shoucang_a = new.xpath("./td[@class='operat']/a[@title='收藏']")
            if checkExist(shoucang_a):
                filename = shoucang_a.xpath("./@data-filename").get("").strip().lower()

            if filename != '':
                # print(filename)
                article_json = subclass_json.copy()
                tmp_json = {
                    "value": value
                }
                article_json.update(tmp_json)
                del article_json["page_index"]
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "rawid": filename,
                    "article_info_json": json.dumps(article_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
                if subclass_json["page_index"] == 1:
                    du_model = DealUpdateModel()
                    duplicte = json_update({"sqlcode": sqlcode})
                    du_model.update_no_placeholder.update(
                        {
                            "subclass_json": f"JSON_SET(subclass_json, {duplicte})",
                            "is_active": "1"
                        }
                    )
                    oplist = []
                    op1 = OperatorSqlModel()
                    op1.key = "task_name"
                    op1.value = sql_model.task_name
                    op1.operator = "="
                    oplist.append(op1)
                    op2 = OperatorSqlModel()
                    op2.key = "task_tag"
                    op2.value = sql_model.task_tag
                    op2.operator = "="
                    oplist.append(op2)
                    op3 = OperatorSqlModel()
                    op3.key = "list_rawid"
                    op3.value = sql_model.list_rawid
                    op3.operator = "="
                    oplist.append(op3)
                    op4 = OperatorSqlModel()
                    op4.key = "page_index"
                    op4.value = subclass_json['page_index']
                    op4.operator = "!="
                    oplist.append(op4)
                    du_model.where = oplist
                    result.befor_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiconference_cnkiconferencearticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    return cnkithesis_cnkithesisarticle_callback(callmodel)


def cnkiconference_cnkiconferencearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_info_json"])
    journal_info = sql_model["journal_info"]
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    data_refcnt = para_dicts["data"]["1_2"]
    data_ref = down_model["1_3"].dict()
    # data_cited = down_model["1_4"].dict()
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["ref_cnt"] = data_refcnt.get("REFERENCE", "")
    cited_cnt = data_refcnt.get("CITING", "")
    if cited_cnt.isdigit():
        cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00090"
    product = "CNKI"
    sub_db = "CPFD"
    provider = "CNKI"
    source_type = "6"
    data["is_deprecated"] = "0"
    rawid = data["rawid"].lower()
    data["rawid_mysql"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    # data["provider_url"] = "https://kns.cnki.net/kcms/detail/detail.aspx?dbcode=cpfd&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    sel = Selector(src_data.html.replace(u"\ue37a",""))
    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'
    data["meeting_name"] = journal_info.get("meeting_name", "")
    if '...' in data["meeting_name"]:
        data["meeting_name"] = cleanSemicolon(''.join(sel.xpath('//span[text()="会议名称："]/parent::div[1]/p//text()').extract()).strip())
    data["meeting_record_name"] = journal_info.get("meeting_record_name", "")
    data["society"] = journal_info.get("society", "")
    data["meeting_code"] = journal_info.get("meeting_code", "")
    data["meeting_place"] = journal_info.get("meeting_place", "")
    data["meeting_record_code"] = journal_info.get("meeting_record_code", "")
    data["publisher"] = journal_info.get("publisher", "")
    pub_date = journal_info.get("pub_date", "").replace("-", "").strip()
    if len(pub_date) > 4:
        pub_year = pub_date[0:4]
    if len(pub_date) == 6:
        pub_date = pub_date + "00"
    else:
        pub_date = ""
        pub_year = ""
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    meeting_date_raw = journal_info.get("meeting_date_raw", "")
    data["meeting_date_raw"] = meeting_date_raw
    tmps_acc = meeting_date_raw.split("-")
    accept_date = ""
    if len(tmps_acc) > 0 and len(tmps_acc[0]) == 8:
        accept_date = tmps_acc[0]
    elif len(meeting_date_raw) > 0:
        accept_date = meeting_date_raw.replace("-", "")
        if accept_date[0] != "1" and accept_date[0] != "2":
            accept_date = ""
        if len(accept_date) < 4:
            accept_date = ""
        elif len(accept_date) == 4:
            accept_date = accept_date + "0000"
        elif len(accept_date) == 6:
            accept_date = accept_date + "00"
        if accept_date == "":
            accept_date = pub_date
    data["accept_date"] = accept_date
    if len(data["pub_date"]) == 0 and len(accept_date) > 0:
        data["pub_date"] = accept_date
        data["pub_year"] = accept_date[0:4]
    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    # author = ""
    # author_id = ""
    # author_1st = ""
    # list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")
    # for au_span in list_au_span:
    #     au_a = au_span.xpath("./a")
    #     au_code = au_span.xpath("./input[@class='authorcode']/@value").get()
    #     if checkExist(au_a):
    #         au_name = au_a.xpath("./text()").get().strip()
    #         author += au_name + ";"
    #         if not checkExist(au_code):
    #             click_au = au_a.xpath("./@onclick").get()
    #             if checkExist(click_au):
    #                 tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au.strip())
    #                 if checkExist(tmp_click_au):
    #                     au_code = tmp_click_au[0].strip()
    #         if checkExist(au_code):
    #             author_id += au_code + "@" + au_name + ";"
    #     else:
    #         au_name = au_span.xpath("./text()").get().strip()
    #         author += au_name + ";"

    # 20251029
    author_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
            if '<sup>' in span.extract():
                author_info = ''.join(span.xpath(".//text()[not(ancestor::sup)]").extract()).strip()
                sup = ''.join(span.xpath(".//sup/text()").extract()).strip()
                author = f'{author_info}[{sup}]'
            else:
                author = ''.join(span.xpath(".//text()").extract()).strip()
            author_list.append(author)
    author = ';'.join(author_list)
    author_id = ''
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info', [])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])

    data["author"] = author
    data["author_id"] = author_id
    data["author_1st"] = re.sub('\[.*?\]','', author.split(';')[0], 1)
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    # organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    # if checkExist(organ_h3):
    #     list_og_a = organ_h3.xpath("./a")
    #     if not checkExist(list_og_a):
    #         list_og_a = organ_h3.xpath("./span/a")
    #     if checkExist(list_og_a):
    #         for a_og in list_og_a:
    #             og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
    #             click_og = a_og.xpath("./@onclick").get()
    #             if checkExist(click_og) and "'in'" in click_og:
    #                 tmp_click_og = re.findall(r"^.*','(\d+)'.*\);$", click_og.strip())
    #                 if checkExist(tmp_click_og):
    #                     og_code = tmp_click_og[0].strip()
    #                     organ_id += og_code + "@" + re.sub(r"^((\d+)\.\s+)", "", og_name) + ";"
    #             og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
    #             organ += og_name
    #     else:
    #         list_span_og = organ_h3.xpath("./span/text()")
    #         if checkExist(list_span_og):
    #             for span_og in list_span_og:
    #                 organ += span_og.get() + ";"
    # organ = cleanSemicolon(organ)
    # organ_id = cleanSemicolon(organ_id)
    # if len(organ) > 0:
    #     vec = organ.split(";")
    #     if len(vec) > 0:
    #         organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    organ_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
            organ_name = ''.join(span.xpath(".//text()").extract()).strip()
            og_name = re.sub("(\d+)\.", r"[\1]", organ_name, count=1)
            organ_list.append(og_name)
    if organ_list:
        organ = ';'.join(organ_list)
        organ_1st = organ_list[0]
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = re.sub('\[.*?\]','', organ.split(';')[0], 1)
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    abstract_ = "".join(sel.xpath("//span[@id='ChDivSummary']/text()").getall())
    abstract_ = cleanSemicolon(abstract_)
    if abstract_.endswith("更多还原"):
        abstract_ = abstract_[0:abstract_.find("更多还原")]
    elif abstract_.endswith("还原"):
        abstract_ = abstract_[0:abstract_.find("还原")]
    if "<正>" in abstract_:
        abstract_ = re.sub(r"^<正>~~", "", abstract_[len("<正>"):])
        abstract_ = re.sub(r"^<正>", "", abstract_).replace("~~", "")
    data["abstract"] = abstract_
    host_organ = cleanSemicolon(data["host_organ"]).replace("；；", ";").replace("；", ";")
    if host_organ.endswith(";"):
        host_organ = host_organ[0:-1]
    data["host_organ"] = cleanSemicolon(host_organ.replace("、", ";")).replace("\"", "")
    fund = cleanSemicolon(data["fund"]).replace("；；", ";").replace("；", ";")
    if not fund:
        fund = ''.join(sel.xpath('//p[@class="funds"]//text()').extract()).strip()
        fund = cleanSemicolon(fund).replace("；；", ";").replace("；", ";")
    if fund.endswith(";"):
        fund = fund[0:-1]
    data["fund"] = fund
    clc_no = ''.join(sel.xpath('//span[text()="分类号："]/parent::li[1]/p//text()').extract()).strip()
    clc_no = cleanSemicolon(clc_no).replace("-", "")
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["doi"] = cleanSemicolon(''.join(sel.xpath('//span[text()="DOI："]/parent::li[1]/p//text()').extract()).strip().replace("-", ";")).replace("-", ";")
    subject = cleanSemicolon(''.join(sel.xpath('//span[text()="专题："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if subject.endswith(";"):
        subject = subject[0:-1]
    sub_db_class_name = cleanSemicolon(''.join(sel.xpath('//span[text()="专辑："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if sub_db_class_name.endswith(";"):
        sub_db_class_name = sub_db_class_name[0:-1]
    data["subject"] = subject
    data["sub_db_class_name"] = sub_db_class_name
    page_info = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页码：")]//text()').extract()).strip()).replace("页码：", "")
    data["page_info"] = page_info
    if '+' in page_info:
        jump_page = page_info.split("+")[-1]
        begin_page = page_info.split("+")[0].split("-")[0]
        end_page = page_info.split("+")[0].split("-")[-1]
    else:
        jump_page = ''
        begin_page = page_info.split("-")[0]
        end_page = page_info.split("-")[-1]
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页数：")]//text()').extract()).strip()).replace("页数：", "")
    down_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"下载：")]//text()').extract()).strip()).replace("下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf']/a[contains(@name, 'pdf')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj']/a[contains(@name, 'caj')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-dlhtml']/a[contains(@name, 'html')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkiconference_cnkiconferencearticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkiconference_cnkiconferencearticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref,repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = data["pub_year"]
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": lngid,
            "table": "other_ref_latest"
        }
    result.save_data = save_data
    # print(data)
    return result


def cnkiipfdconference_cnkiipfdconferenceclass_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    return cnkiconference_cnkiconferenceclass_callback(callmodel)


def cnkiipfdconference_cnkiipfdconferencelist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return cnkiconference_cnkiconferencelist_callback(callmodel)


def cnkiipfdconference_cnkiipfdconferencesublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    return cnkiconference_cnkiconferencesublist_callback(callmodel)


def cnkiipfdconference_cnkiipfdconferencearticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    return cnkithesis_cnkithesisarticle_callback(callmodel)


def cnkiipfdconference_cnkiipfdconferencearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_info_json"])
    journal_info = sql_model["journal_info"]
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    data_refcnt = para_dicts["data"]["1_2"]
    data_ref = down_model["1_3"].dict()
    # data_cited = down_model["1_4"].dict()
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["ref_cnt"] = data_refcnt.get("REFERENCE", "")
    cited_cnt = data_refcnt.get("CITING", "")
    if cited_cnt.isdigit():
        cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00091"
    product = "CNKI"
    sub_db = "IPFD"
    provider = "CNKI"
    source_type = "6"
    data["is_deprecated"] = "0"
    rawid = data["rawid"].lower()
    data["rawid_mysql"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    # data["provider_url"] = "https://kns.cnki.net/kcms/detail/detail.aspx?dbcode=ipfd&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    sel = Selector(src_data.html)
    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'
    data["meeting_name"] = journal_info.get("meeting_name", "")
    if '...' in data["meeting_name"]:
        data["meeting_name"] = cleanSemicolon(''.join(sel.xpath('//span[text()="会议名称："]/parent::div[1]/p//text()').extract()).strip())
    data["meeting_record_name"] = journal_info.get("meeting_record_name", "")
    data["society"] = journal_info.get("society", "")
    data["meeting_code"] = journal_info.get("meeting_code", "")
    data["meeting_place"] = journal_info.get("meeting_place", "")
    data["meeting_record_code"] = journal_info.get("meeting_record_code", "")
    data["publisher"] = journal_info.get("publisher", "")
    pub_date = journal_info.get("pub_date", "").replace("-", "").strip()
    if len(pub_date) > 4:
        pub_year = pub_date[0:4]
    if len(pub_date) == 6:
        pub_date = pub_date + "00"
    else:
        pub_date = ""
        pub_year = ""
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    meeting_date_raw = journal_info.get("meeting_date_raw", "")
    data["meeting_date_raw"] = meeting_date_raw
    tmps_acc = meeting_date_raw.split("-")
    accept_date = ""
    if len(tmps_acc) > 0 and len(tmps_acc[0]) == 8:
        accept_date = tmps_acc[0]
    elif len(meeting_date_raw) > 0:
        accept_date = meeting_date_raw.replace("-", "")
        if accept_date[0] != "1" and accept_date[0] != "2":
            accept_date = ""
        if len(accept_date) < 4:
            accept_date = ""
        elif len(accept_date) == 4:
            accept_date = accept_date + "0000"
        elif len(accept_date) == 6:
            accept_date = accept_date + "00"
        if accept_date == "":
            accept_date = pub_date
    data["accept_date"] = accept_date
    if len(data["pub_date"]) == 0 and len(accept_date) > 0:
        data["pub_date"] = accept_date
        data["pub_year"] = accept_date[0:4]
    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    # author = ""
    # author_id = ""
    # author_1st = ""
    # list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")
    # for au_span in list_au_span:
    #     au_a = au_span.xpath("./a")
    #     au_code = au_span.xpath("./input[@class='authorcode']/@value").get()
    #     if checkExist(au_a):
    #         au_name = au_a.xpath("./text()").get().strip()
    #         author += au_name + ";"
    #         if not checkExist(au_code):
    #             click_au = au_a.xpath("./@onclick").get()
    #             if checkExist(click_au):
    #                 tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au.strip())
    #                 if checkExist(tmp_click_au):
    #                     au_code = tmp_click_au[0].strip()
    #         if checkExist(au_code):
    #             author_id += au_code + "@" + au_name + ";"
    #     else:
    #         au_name = au_span.xpath("./text()").get().strip()
    #         author += au_name + ";"
    # author = cleanSemicolon(author)
    # author_id = cleanSemicolon(author_id)
    # # 20250107
    # if (not author_id) and src_data.author_organ:
    #     author_info = json.loads(src_data.author_organ).get('author_info', [])
    #     _ids = list()
    #     for aut in author.split(';'):
    #         for item in author_info:
    #             if aut != item['author_name']:
    #                 continue
    #             _ids.append(f"{item['author_code']}@{aut}")
    #     author_id = ';'.join(_ids)
    # if len(author) > 0:
    #     vec = author.split(";")
    #     if len(vec):
    #         author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    # data["author"] = author
    # data["author_id"] = author_id
    # data["author_1st"] = author_1st

    # 20251029
    author_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
            if '<sup>' in span.extract():
                author_info = ''.join(span.xpath(".//text()[not(ancestor::sup)]").extract()).strip()
                sup = ''.join(span.xpath(".//sup/text()").extract()).strip()
                author = f'{author_info}[{sup}]'
            else:
                author = ''.join(span.xpath(".//text()").extract()).strip()
            author_list.append(author)
    author = ';'.join(author_list)
    author_id = ''
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info', [])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])

    data["author"] = author
    data["author_id"] = author_id
    data["author_1st"] = re.sub('\[.*?\]', '', author.split(';')[0], 1)
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    # organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    # if checkExist(organ_h3):
    #     list_og_a = organ_h3.xpath("./a")
    #     if not checkExist(list_og_a):
    #         list_og_a = organ_h3.xpath("./span/a")
    #     if checkExist(list_og_a):
    #         for a_og in list_og_a:
    #             og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
    #             click_og = a_og.xpath("./@onclick").get()
    #             if checkExist(click_og) and "'in'" in click_og:
    #                 tmp_click_og = re.findall(r"^.*','(\d+)'.*\);$", click_og.strip())
    #                 if checkExist(tmp_click_og):
    #                     og_code = tmp_click_og[0].strip()
    #                     organ_id += og_code + "@" + re.sub(r"^((\d+)\.\s+)", "", og_name) + ";"
    #             og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
    #             organ += og_name
    #     else:
    #         list_span_og = organ_h3.xpath("./span/text()")
    #         if checkExist(list_span_og):
    #             for span_og in list_span_og:
    #                 organ += span_og.get() + ";"
    # organ = cleanSemicolon(organ)
    # organ_id = cleanSemicolon(organ_id)
    # if len(organ) > 0:
    #     vec = organ.split(";")
    #     if len(vec) > 0:
    #         organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    organ_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
            organ_name = ''.join(span.xpath(".//text()").extract()).strip()
            og_name = re.sub("(\d+)\.", r"[\1]", organ_name, count=1)
            organ_list.append(og_name)
    if organ_list:
        organ = ';'.join(organ_list)
        organ_1st = organ_list[0]
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = re.sub('\[.*?\]','', organ.split(';')[0], 1)
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    abstract_ = "".join(sel.xpath("//span[@id='ChDivSummary']/text()").getall())
    abstract_ = cleanSemicolon(abstract_)
    if abstract_.endswith("更多还原"):
        abstract_ = abstract_[0:abstract_.find("更多还原")]
    elif abstract_.endswith("还原"):
        abstract_ = abstract_[0:abstract_.find("还原")]
    if "<正>" in abstract_:
        abstract_ = re.sub(r"^<正>~~", "", abstract_[len("<正>"):])
        abstract_ = re.sub(r"^<正>", "", abstract_).replace("~~", "")
    data["abstract"] = abstract_
    if not is_chinese(data["title"]):
        if len(data["abstract"]) == 0 or not is_chinese(data["abstract"]):
            data["language"] = "EN"
    host_organ = cleanSemicolon(data["host_organ"]).replace("；；", ";").replace("；", ";")
    if host_organ.endswith(";"):
        host_organ = host_organ[0:-1]
    data["host_organ"] = cleanSemicolon(host_organ.replace("、", ";")).replace("\"", "")
    fund = cleanSemicolon(data["fund"]).replace("；；", ";").replace("；", ";")
    if not fund:
        fund = ''.join(sel.xpath('//p[@class="funds"]//text()').extract()).strip()
        fund = cleanSemicolon(fund).replace("；；", ";").replace("；", ";")
    if fund.endswith(";"):
        fund = fund[0:-1]
    data["fund"] = fund
    clc_no = ''.join(sel.xpath('//span[text()="分类号："]/parent::li[1]/p//text()').extract()).strip()
    clc_no = cleanSemicolon(clc_no).replace("-", "")
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["doi"] = cleanSemicolon(''.join(sel.xpath('//span[text()="DOI："]/parent::li[1]/p//text()').extract()).strip().replace("-", ";")).replace("-", ";")
    subject = cleanSemicolon(''.join(sel.xpath('//span[text()="专题："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if subject.endswith(";"):
        subject = subject[0:-1]
    sub_db_class_name = cleanSemicolon(''.join(sel.xpath('//span[text()="专辑："]/parent::li[1]/p//text()').extract()).strip()).replace("；；", ";").replace("；", ";")
    if sub_db_class_name.endswith(";"):
        sub_db_class_name = sub_db_class_name[0:-1]
    data["subject"] = subject
    data["sub_db_class_name"] = sub_db_class_name
    page_info = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页码：")]//text()').extract()).strip()).replace("页码：", "")
    data["page_info"] = page_info
    if '+' in page_info:
        jump_page = page_info.split("+")[-1]
        begin_page = page_info.split("+")[0].split("-")[0]
        end_page = page_info.split("+")[0].split("-")[-1]
    else:
        jump_page = ''
        begin_page = page_info.split("-")[0]
        end_page = page_info.split("-")[-1]
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页数：")]//text()').extract()).strip()).replace("页数：", "")
    down_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"下载：")]//text()').extract()).strip()).replace("下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf']/a[contains(@name, 'pdf')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj']/a[contains(@name, 'caj')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-dlhtml']/a[contains(@name, 'html')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkiconference_cnkiconferencearticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkiconference_cnkiconferencearticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref,repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = data["pub_year"]
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": lngid,
            "table": "other_ref_latest"
        }
    result.save_data = save_data
    return result


def cnkiccndpaper_cnkipaperclass_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data["lblPageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            info_dicts = {
                "list_rawid": sql_model.list_rawid,
                "task_name": sql_model.task_name,
                "task_tag": sql_model.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "home_json": sql_model.home_json,
            }
            d_i_model = DealInsertModel()
            d_i_model.insert_pre = CoreSqlValue.insert_ig_it
            for i in range(1, int(total_page) + 1):
                temp = info_dicts.copy()
                temp["page"] = total_page
                temp["page_index"] = i
                d_i_model.lists.append(temp)
            result.befor_dicts.insert.append(d_i_model)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            result.befor_dicts.update.update({'page': total_page})
            journal_name = item['papername'].strip()
            if len(journal_name) != 0:
                journal_rawid = get_journal_rawid(item["pykm"])
                home_json = json.loads(sql_model.home_json)
                home_json["journal_raw_id"] = journal_rawid
                home_json["journal_name"] = journal_name
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": journal_rawid,
                    "page": total_page,
                    "page_index": "-1",
                    "list_json": json.dumps(home_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiccndpaper_cnkipaperlist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    list_json = json.loads(sql_model.list_json)
    sqlcode = para_dicts["data"]["1_3"].get("sqlcode", "")
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"].get("html", "")
        sel = Selector(html)
        p_rs = sel.xpath("//ul[@id='NBaseInfo']/li/p")
        if checkExist(p_rs):
            for p in p_rs:
                txt = p.xpath("./label/text()").extract()[0].strip()
                val = Mysql.escape(p.xpath("./span/text()").extract()[0].strip().replace(" ", "")).strip()
                if "主办单位" in txt:
                    list_json["host_organ"] = val
                elif "主管单位" in txt:
                    list_json["director_dept"] = val
                elif "国内统一刊号" in txt:
                    list_json["cnno"] = val
                elif "开版" in txt:
                    list_json["book_size"] = val
                elif "出版地" in txt:
                    list_json["pub_place"] = val
                elif "出版周期" in txt:
                    list_json["type_name"] = val
        p_rs = sel.xpath("//ul[@id='NContactInfo']/li/p")
        if checkExist(p_rs):
            for p in p_rs:
                txt = p.xpath("./label/text()").extract()[0].strip()
                val = Mysql.escape(p.xpath("./span/text()").extract()[0].strip().replace(" ", "")).strip()
                if "邮编" in txt:
                    list_json["postcode"] = val
                elif "网址" in txt:
                    list_json["web_stie"] = val
                elif "通信地址" in txt:
                    list_json["edit_office_addr"] = val
                elif "EMail" in txt:
                    list_json["email"] = val
                elif "电话" in txt:
                    list_json["tel_code"] = val
        du_model = DealUpdateModel()
        # du_model.update.update({
        #     "sub_db_id": "00002",
        #     "is_active": "1"})
        du_model.where.update({"list_rawid": sql_model.list_rawid,
                               "task_tag": task_info.task_tag,
                               "task_name": task_info.task_name,
                               })
        duplicte = json_update(list_json)
        du_model.update_no_placeholder.update({"list_json": f"JSON_SET(list_json, {duplicte})"})
        result.befor_dicts.update_list.append(du_model)
    if "1_2" in para_dicts["data"]:
        list_years = para_dicts["data"]["1_2"].get("children", "")
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in list_years:
            totalnum = item["totalnum"]
            if "(" not in totalnum:
                continue
            totalnum = int(totalnum.replace("(", "").replace(")", ""))
            total_page = int((totalnum + 49) / 50)
            result.code_dicts = {
                "1_1": {"max_page": total_page}
            }
            years = item['years']
            for page_index in range(1, total_page + 1):
                tmp_json = {
                    "years": years,
                    "page_index": page_index,
                    "journal_raw_id": list_json["journal_raw_id"],
                    "journal_name": list_json["journal_name"]
                }
                is_active = 0
                if page_index == 1:
                    tmp_json["sqlcode"] = sqlcode
                    tmp_json["HandlerId"] = "4"
                    tmp_json["IsSearch"] = "true"
                    is_active = 1
                else:
                    tmp_json["HandlerId"] = "9"
                    tmp_json["IsSearch"] = "false"
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}".format(list_json["journal_raw_id"], years),
                    "page": total_page,
                    "page_index": page_index,
                    "is_active": is_active,
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiccndpaper_cnkipapersublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    html = callmodel.para_dicts["data"]["1_1"].get("html", "")
    sel = Selector(html)
    sqlcode = sel.xpath("//input[@id='sqlVal']/@value").extract()[0]
    tr_rs = sel.xpath("//table/tr")
    if tr_rs and len(tr_rs) > 0:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for new in tr_rs:
            dbcode = ""
            filename = ""
            # 获取链接
            href = new.xpath("./td[@class='name']/a/@href").get("").strip().lower()
            hrefs = href.split("?")[1].split("&")
            for hr in hrefs:
                if "dbcode" in hr:
                    dbcode = hr.split("=")[1]
                elif "filename" in hr:
                    filename = hr.split("=")[1]
            if filename != '' and dbcode != '':
                article_json = subclass_json.copy()
                tmp_json = {
                    "dbcode": dbcode.upper()
                }
                article_json.update(tmp_json)
                del article_json["page_index"]
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "rawid": filename,
                    "article_json": json.dumps(article_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
                if subclass_json["page_index"] == 1:
                    du_model = DealUpdateModel()
                    duplicte = json_update({"sqlcode": sqlcode})
                    du_model.update_no_placeholder.update(
                        {
                            "subclass_json": f"JSON_SET(subclass_json, {duplicte})",
                            "is_active": "1"
                        }
                    )
                    oplist = []
                    op1 = OperatorSqlModel()
                    op1.key = "task_name"
                    op1.value = sql_model.task_name
                    op1.operator = "="
                    oplist.append(op1)
                    op2 = OperatorSqlModel()
                    op2.key = "task_tag"
                    op2.value = sql_model.task_tag
                    op2.operator = "="
                    oplist.append(op2)
                    op3 = OperatorSqlModel()
                    op3.key = "list_rawid"
                    op3.value = sql_model.list_rawid
                    op3.operator = "="
                    oplist.append(op3)
                    op4 = OperatorSqlModel()
                    op4.key = "page_index"
                    op4.value = subclass_json['page_index']
                    op4.operator = "!="
                    oplist.append(op4)
                    du_model.where = oplist
                    result.befor_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkiccndpaper_cnkipaperarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result


def cnkiccndpaper_cnkipaperarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    journal_info = sql_model["journal_info"]
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    dbcode = article_json["dbcode"]
    rawid = sql_model["rawid"]
    data["rawid"] = rawid
    data["rawid_mysql"] = rawid
    data["batch"] = batch
    sub_db_id = "00080"
    product = "CNKI"
    sub_db = "CCND"
    provider = "CNKI"
    source_type = "11"
    data["is_deprecated"] = "0"
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "http://kns.cnki.net/kcms/detail/detail.aspx?dbcode=" + dbcode + "&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    data["journal_raw_id"] = journal_info.get("pykm", "")
    data["journal_name"] = journal_info.get("journal_name", "")
    data["host_organ"] = journal_info.get("host_organ", "")
    data["director_dept"] = journal_info.get("director_dept", "")
    data["pub_place"] = journal_info.get("pub_place", "")
    data["type_name"] = journal_info.get("type_name", "")
    data["book_size"] = journal_info.get("book_size", "")
    data["cnno"] = journal_info.get("cnno", "")
    data["postcode"] = journal_info.get("organ_area", "")
    data["edit_office_addr"] = journal_info.get("edit_office_addr", "")
    data["web_stie"] = journal_info.get("web_stie", "")
    data["raw_type"] = journal_info.get("raw_type", "")
    data["email"] = journal_info.get("email", "")
    data["tel_code"] = journal_info.get("tel_phone", "")

    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    data["title_sub"] = cleanSemicolon(data["title_sub"])
    data["title_catalyst"] = cleanSemicolon(data["title_catalyst"])
    author = ""
    author_id = ""
    author_1st = ""
    list_au_span = sel.xpath("//div[@class='wx-tit']/h3/span")
    for au_span in list_au_span:
        au_a = au_span.xpath("./a")
        au_code = au_span.xpath("./input[@class='authorcode']/@value").get("")
        if checkExist(au_a):
            au_name = au_a.xpath("./text()").get().strip()
            author += au_name + ";"
            if not checkExist(au_code):
                click_au = au_a.xpath("./@onclick").get()
                if checkExist(click_au):
                    tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au.strip())
                    if checkExist(tmp_click_au):
                        au_code = tmp_click_au[0].strip()
            if len(au_code) > 0:
                author_id += au_code + "@" + au_name + ";"
        else:
            au_name = au_span.xpath("./text()").get("").strip()
            author += au_name + ";"
    author = cleanSemicolon(author)
    author_id = cleanSemicolon(author_id)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    data["author"] = author
    data["author_id"] = author_id
    data["author_1st"] = author_1st
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    clc_no = ''.join(sel.xpath('//span[text()="分类号："]/parent::li[1]/p//text()').extract()).strip()
    clc_no = cleanSemicolon(clc_no).replace("-", "")
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    abstract_ = cleanSemicolon("".join(sel.xpath('//span[@id="ChDivSummary"]//text()').extract()))
    if not checkExist(abstract_):
        abstract_ = cleanSemicolon(sel.xpath("//div[@class='abstract-text']/text()").get(""))
    if abstract_.endswith("更多还原"):
        abstract_ = abstract_[0:abstract_.find("更多还原")]
    elif abstract_.endswith("还原"):
        abstract_ = abstract_[0:abstract_.find("还原")]
    data["abstract"] = abstract_
    pub_date = cleanSemicolon(data["pub_date"].replace("-", "").replace("/", "").replace(" ", ""))
    pub_year = ""
    if len(pub_date) > 4:
        pub_year = pub_date[0:4]
    if len(pub_date) == 6:
        pub_date = pub_date + "00"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["column_info"] = cleanSemicolon(data["column_info"].replace("-", ""))
    data["edition_no"] = cleanSemicolon(data["edition_no"].replace("-", ""))
    data["doi"] = cleanSemicolon(data["doi"].replace("-", ""))
    subject = cleanSemicolon(''.join(sel.xpath('//span[text()="专题："]/parent::li[1]/p//text()').extract()).strip().replace("；；", ";").replace("；", ";"))
    if subject.endswith(";"):
        subject = subject[0:-1]
    data["subject"] = subject
    sub_db_class_name = cleanSemicolon(''.join(sel.xpath('//span[text()="专辑："]/parent::li[1]/p//text()').extract()).strip().replace("；；", ";").replace("；", ";"))
    if sub_db_class_name.endswith(";"):
        sub_db_class_name = subject[0:-1]
    data["sub_db_class_name"] = sub_db_class_name
    down_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"下载：")]//text()').extract()).strip().replace("下载：", ""))
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    page_cnt = cleanSemicolon(''.join(sel.xpath('//div[@id="DownLoadParts"]//span[contains(text(),"页数：")]//text()').extract()).strip().replace("页数：", ""))
    data["page_cnt"] = page_cnt
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf']/a[contains(@name, 'pdf')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj']/a[contains(@name, 'caj')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-dlhtml']/a[contains(@name, 'html')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    result.save_data = [{"table": "paper_latest", "data": data}]
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkiccndpaper_cnkipaperarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkiccndpaper_cnkipaperarticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    return result


def cnkipatent_cnkizlhome_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    sqlcode = para_dicts["data"]["1_2"].get("sqlcode", "")
    list_subject = para_dicts["data"]["1_1"].get("children", "")
    if len(sqlcode) > 0 and len(list_subject) > 0:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in list_subject:
            totalnum = item["totalnum"]
            if "(" not in totalnum:
                continue
            totalnum = int(totalnum.replace("(", "").replace(")", ""))
            total_page = int((totalnum + 49) / 50)
            result.code_dicts = {
                "1_1": {"max_page": total_page}
            }
            home_json = json.loads(sql_model.home_json)
            subject = item['subject']
            sub_code = item['sub_code']
            for page_index in range(1, total_page + 1):
                list_json = home_json.copy()
                tmp_json = {
                    "subject": subject,
                    "sub_code": sub_code,
                    "page_index": page_index
                }
                is_active = 0
                if page_index == 1:
                    tmp_json["sqlcode"] = sqlcode
                    tmp_json["HandlerId"] = "9"
                    tmp_json["IsSearch"] = "true"
                    is_active = 1
                else:
                    tmp_json["HandlerId"] = "14"
                    tmp_json["IsSearch"] = "false"
                list_json.update(tmp_json)
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}_{}".format(home_json["types"], home_json["start_date"], subject),
                    "page": total_page,
                    "page_index": page_index,
                    "is_active": is_active,
                    "list_json": json.dumps(list_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkipatent_cnkizllist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    list_json = json.loads(sql_model.list_json)
    html = callmodel.para_dicts["data"]["1_1"].get("html", "")
    sel = Selector(html)
    sqlcode = sel.xpath("//input[@id='sqlVal']/@value").extract()[0]
    tr_rs = sel.xpath("//table/tr")
    if tr_rs and len(tr_rs) > 0:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for new in tr_rs:
            dbcode = ""
            filename = ""
            # 获取链接
            href = new.xpath("./td[@class='name']/a/@href").get("").strip()
            hrefs = href.split("?")[1].split("&")
            for hr in hrefs:
                if "dbcode" in hr:
                    dbcode = hr.split("=")[1]
                elif "filename" in hr:
                    filename = hr.split("=")[1]
            years = new.xpath("./td[6]/text()").get("").replace("-", "").strip()
            if filename != '' and dbcode != '' and len(years) == 8 and years.isdigit():
                article_json = list_json.copy()
                tmp_json = {
                    "dbcode": dbcode,
                    "years": years[0:4],
                }
                article_json.update(tmp_json)
                del article_json["page_index"]
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "rawid": filename,
                    "article_json": json.dumps(article_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
                if list_json["page_index"] == 1:
                    du_model = DealUpdateModel()
                    duplicte = json_update({"sqlcode": sqlcode})
                    du_model.update_no_placeholder.update(
                        {"list_json": f"JSON_SET(list_json, {duplicte})"})
                    oplist = []
                    op1 = OperatorSqlModel()
                    op1.key = "task_name"
                    op1.value = sql_model.task_name
                    op1.operator = "="
                    oplist.append(op1)
                    op2 = OperatorSqlModel()
                    op2.key = "task_tag"
                    op2.value = sql_model.task_tag
                    op2.operator = "="
                    oplist.append(op2)
                    op3 = OperatorSqlModel()
                    op3.key = "list_rawid"
                    op3.value = sql_model.list_rawid
                    op3.operator = "="
                    oplist.append(op3)
                    op4 = OperatorSqlModel()
                    op4.key = "page_index"
                    op4.value = list_json['page_index']
                    op4.operator = "!="
                    oplist.append(op4)
                    du_model.where = oplist
                    result.befor_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkipatent_cnkizlarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    sql_model = callmodel.sql_model
    html = callmodel.para_dicts["data"]["1_1"].get("html", "")
    sqh = re.findall(r"SetLawState\(\'(.*?)\'\)", html)
    if sqh and len(sqh) > 0:
        sqh = sqh[0]
        du_model = DealUpdateModel()
        duplicte = json_update({"sqh": sqh})
        du_model.update_no_placeholder.update({"article_json": f"JSON_SET(article_json, {duplicte})"})
        du_model.where.update({
            "task_name": sql_model.task_name,
            "task_tag": sql_model.task_tag,
            "rawid": sql_model.rawid
        })
        result.befor_dicts.update_list.append(du_model)
    return result

def cleanSemicolon(text):
    text = text.replace('；', ';').replace("; ", ";")  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub(" +", " ", text)  # 多个空格转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False

def cnkipatent_cnkizlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00003"
    product = "CNKI"
    sub_db = "SCPD"
    provider = "CNKI"
    source_type = "7"
    data["is_deprecated"] = "0"
    data["pub_no"] = data["pub_no"].strip().replace(" ", "")
    rawid = data["pub_no"]
    data["rawid"] = rawid
    data["rawid_mysql"] = rawid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://kns.cnki.net/kcms/detail/detail.aspx?dbcode=SCPD&filename=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"
    data["app_no"] = data["app_no"].replace("；", ";")
    data["app_date"] = data["app_date"].strip().replace("  ", "").replace("-", "")
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    pub_date = data["pub_date"].strip().replace("  ", "").replace("-", "")
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        years = article_json.get("years", "")
        if len(years) == 4:
            pub_year = years
            pub_date = pub_year + "0000"
        # else:
        #     pub_year = "1900"
        #     pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())
    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    data["applicant"] = data["applicant"].replace(" ", "")
    data["applicant_addr"] = data["applicant_addr"].replace(" ", "")
    data["author"] = data["author"].replace(" ", "")
    data["agency"] = data["agency"].replace(" ", "")
    data["agent"] = data["agent"].replace(" ", "")
    data["ipc_no_1st"] = data["ipc_no_1st"].replace(" ", "")
    data["ipc_no"] = data["ipc_no"].replace(" ", "")
    data["organ_area"] = data["organ_area"].replace(" ", "")
    data["page_cnt"] = data["page_cnt"].replace(" ", "")
    data["priority"] = data["priority"].replace(" ", "")
    data["pct_app_data"] = data["pct_app_data"].replace(" ", "")
    data["pct_pub_data"] = data["pct_pub_data"].replace(" ", "")
    data["claim"] = data["claim"].replace(" ", "")
    data["abstract"] = data["abstract"].replace(" ", "")
    fulltext_type = ""
    pdf = sel.xpath("//div[@id='DownLoadParts']/ul/li/a[contains(text(), 'PDF')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//div[@id='DownLoadParts']/ul/li/a[contains(text(), 'CAJ')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//div[@id='DownLoadParts']/ul/li/a[contains(text(), 'HTML')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    legSel = Selector(text=down_model["1_2"].html)
    legal_status = ""
    tb_leg = legSel.xpath("//table[@class='zl-table']")
    if checkExist(tb_leg):
        trs = tb_leg.xpath("./tbody/tr")
        for tr in trs:
            tds = tr.xpath("./td")
            if checkExist(tds):
                if len(tds) == 2:
                    legal_status = legal_status + "[" + tds[0].xpath("./text()").get("").strip() + "]" + tds[1].xpath(
                        "./text()").get("").strip() + ";"
                elif len(tds) == 3:
                    legal_status = legal_status + "[" + tds[0].xpath("./text()").get("").strip() + "]" + tds[1].xpath(
                        "./text()").get("").strip() + "," + tds[2].xpath("./text()").get("").strip() + ";"
        legal_status = cleanSemicolon(legal_status)
        if "未查询到" in legal_status:
            legal_status = ""
    data["legal_status"] = legal_status
    data["raw_type"] = article_json.get("types_cn")
    result.save_data = [{"table": "cnkipatent_latest", "data": data}]
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkipatent_cnkizlarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkipatent_cnkizlarticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    return result

def nstl_nstllist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    result_dicts = callmodel.para_dicts["data"]["1_1"]
    total = result_dicts["total"]
    data_lists = result_dicts["data"]

    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}

    result.befor_dicts.update = {"list_json": callmodel.sql_model.list_json}

    di_model_bef = DealInsertModel()
    for item in data_lists:
        one_data_dict = {}
        for item1 in item:
            key = item1["f"]
            value = item1["v"]
            one_data_dict[key] = value
        one_data_dict["page"] = callmodel.sql_model.page_index
        rawid = one_data_dict["id"]
        sql_dict = info_dicts.copy()
        sql_dict["task_tag"] = sql_dict["task_tag_next"]
        del sql_dict["task_tag_next"]
        sql_dict["rawid"] = rawid
        sql_dict["article_json"] = json.dumps(one_data_dict, ensure_ascii=False)

        di_model_bef.lists.append(sql_dict.copy())

    di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
    result.next_dicts.insert.append(di_model_bef)
    if callmodel.sql_model.page_index == 1:
        pageTotal = math.ceil(int(total) / 10) + 1
        di_model_bef = DealInsertModel()
        for i in range(2, pageTotal):
            sql_dict = info_dicts.copy()
            del sql_dict["task_tag_next"]
            sql_dict["list_rawid"] = callmodel.sql_model.list_rawid
            sql_dict["page_index"] = i
            sql_dict["list_json"] = callmodel.sql_model.list_json
            sql_dict["page"] = pageTotal
            sql_dict["is_active"] = 1
            di_model_bef.lists.append(sql_dict.copy())

        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        result.befor_dicts.insert.append(di_model_bef)

    return result

def nstl_nstlarticle_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    return result

def isticword_isticword_home_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    list_rawid = callmodel.sql_model.list_rawid
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "list_rawid": list_rawid,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next,
                  "page_index": -1}
    html = callmodel.para_dicts["data"]["1_1"]["html"]
    str_data_dict = re.findall("var data = (.*?);", html)
    data = str_data_dict[0]
    data = data.lstrip("'").rstrip("'")
    lists = json.loads(data)
    dim = DealInsertModel()
    dim.insert_pre = CoreSqlValue.insert_ig_it
    for item in lists:
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        temp["list_rawid"] = item["id"]
        temp["list_json"] = json.dumps(item, ensure_ascii=False)
        temp["is_active"] = 1
        temp["other_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["null_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["page"] = 0
        dim.lists.append(temp.copy())
    result.next_dicts.insert.append(dim)
    return result

def isticword_isticword_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    list_rawid = callmodel.sql_model.list_rawid
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "list_rawid": list_rawid,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next,
                  "page_index": -1}

    lists = callmodel.para_dicts["data"]["1_1"]
    dim = DealInsertModel()
    dim.insert_pre = CoreSqlValue.insert_ig_it
    dim2 = DealInsertModel()
    dim2.insert_pre = CoreSqlValue.insert_ig_it
    for item in lists:

        if item["isParent"] == "true":
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"].split(";")[0]
            del temp["task_tag_next"]
            item["superior_id"] = callmodel.sql_model.list_rawid
            temp["list_rawid"] = item["id"]
            temp["list_json"] = json.dumps(item, ensure_ascii=False)
            temp["is_active"] = 1
            temp["other_dicts"] = json.dumps({}, ensure_ascii=False)
            temp["null_dicts"] = json.dumps({}, ensure_ascii=False)
            temp["page"] = 0
            dim.lists.append(temp.copy())
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"].split(";")[1]
        del temp["task_tag_next"]
        item["superior_id"] = callmodel.sql_model.list_rawid
        temp["list_rawid"] = item["id"]
        temp["subclass_json"] = json.dumps(item, ensure_ascii=False)
        temp["is_active"] = 1
        temp["other_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["null_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["page"] = 0
        temp["page_index"] = 1
        dim2.lists.append(temp.copy())
    result.befor_dicts.insert.append(dim)
    # 将原来sql的数据放入下级 也需要进行分页下载
    temp = info_dicts.copy()
    temp["task_tag"] = temp["task_tag_next"].split(";")[1]
    del temp["task_tag_next"]
    temp["list_rawid"] = callmodel.sql_model.list_rawid
    temp["subclass_json"] = callmodel.sql_model.list_json
    temp["is_active"] = 1
    temp["other_dicts"] = json.dumps({}, ensure_ascii=False)
    temp["null_dicts"] = json.dumps({}, ensure_ascii=False)
    temp["page"] = 0
    temp["page_index"] = 1
    dim2.lists.append(temp.copy())
    result.next_dicts.insert.append(dim2)
    return result

def isticword_isticword_subclass_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next
                  }
    article_count = callmodel.para_dicts["data"]["1_1"]["article_count"]
    if article_count.find("当前无记录") > -1:
        article_count = ["0"]
    else:
        article_count = re.findall("共(.*?)条", article_count)
    pageTotal = math.ceil(int(article_count[0]) / 50)
    html = callmodel.para_dicts["data"]["1_1"]["html_data"]
    html = html.replace("\t", "")
    list_result = []
    for one_html in html.split("<hr>"):
        res = Selector(text=one_html)
        name = res.xpath("//h4/a/text()").get()
        if name is None:
            continue
        source = res.xpath("//span/@title").get()
        source_little = res.xpath("//span/text()").get()
        all_p = res.xpath("//p/text()").getall()
        eng_name, class_ = "", ""
        for one_txt in all_p:
            if one_txt.find("【 英文 】") > -1:
                eng_name = one_txt.replace("【 英文 】", "").strip()
            if one_txt.find("【 分类 】") > -1:
                class_ = one_txt.replace("【 分类 】", "").strip()
        result_one = (name.strip(), source.strip(), source_little.strip(), eng_name, class_)
        list_result.append(result_one)
    sql_dict = callmodel.sql_model.dict()
    if sql_dict["page_index"] == 1:
        items = {"article_count": str(article_count[0])}
        duplicte = json_update(items)
        du_model = DealUpdateModel()
        du_model.update_no_placeholder.update(
            {"subclass_json": f"JSON_SET(subclass_json, {duplicte})"})
        du_model.where = {
            "task_name": task_info.task_name,
            "task_tag": task_info.task_tag,
            "list_rawid": callmodel.sql_model.list_rawid
        }
        result.befor_dicts.update_list.append(du_model)
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        for page in range(1, pageTotal + 1):
            sql_dict = callmodel.sql_model.dict()
            sql_dict["page"] = pageTotal
            sql_dict["page_index"] = page
            json_dicts = json.loads(sql_dict["subclass_json"])
            json_dicts["article_count"] = str(article_count[0])
            subclass_json = json.dumps(json_dicts, ensure_ascii=False)
            sql_dict["subclass_json"] = subclass_json
            di_model_bef.lists.append(sql_dict.copy())
        result.befor_dicts.insert.append(di_model_bef)
    class_id = callmodel.sql_model.list_rawid
    di_model_bef = DealInsertModel()
    di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
    dim2 = DealInsertModel()
    dim2.insert_pre = CoreSqlValue.insert_ig_it
    for data_one in list_result:
        name, source, source_little, eng_name, class_ = data_one
        data_dicts = {
            "name": name,
            "source": source,
            "source_little": source_little,
            "english_name": eng_name,
            "class": class_,
            "class_id": class_id
        }
        list_rawid = "_".join([name, str(class_id)])
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"].split(";")[1]
        del temp["task_tag_next"]
        temp["rawid"] = list_rawid
        temp["article_json"] = json.dumps(data_dicts, ensure_ascii=False)
        temp["other_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["null_dicts"] = json.dumps({}, ensure_ascii=False)
        temp["count"] = str(article_count[0])
        dim2.lists.append(temp.copy())
    result.next_dicts.insert.append(dim2)
    return result

def wanfangmedbs_wanfangmedbshomeinit_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]["1_1"]
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    for item in data['bs_info']['children']:
        list_rawid = item['key_name']
        new_dict = dict()
        new_dict["url"] = item['value']

        temp_info = info_dicts.copy()
        task_tag_next = temp_info["task_tag_next"]
        temp_info["task_tag"] = task_tag_next
        del temp_info["task_tag_next"]
        temp_info["list_rawid"] = list_rawid
        temp_info["page_index"] = 1
        temp_info["is_active"] = 1
        temp_info["home_json"] = json.dumps(new_dict, ensure_ascii=False)
        di_model_next.lists.append(temp_info)

    result.befor_dicts.insert.append(di_model_next)
    return result

def wanfangmedbs_wanfangmedbshome_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]["1_1"]
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    pages = data["page"].replace("共", "").replace("页", "").strip()
    list_rawid = callmodel.sql_model.list_rawid
    list_json = json.loads(callmodel.sql_model.home_json)
    url = list_json["url"]

    for page in range(1, int(pages) + 1):
        # list_rawid = item['key_name']
        new_dict = dict()
        new_dict["url"] = url
        temp_info = info_dicts.copy()
        task_tag_next = temp_info["task_tag_next"]
        temp_info["task_tag"] = task_tag_next
        del temp_info["task_tag_next"]
        temp_info["list_rawid"] = list_rawid
        temp_info["page_index"] = page
        temp_info["is_active"] = 1
        temp_info["list_json"] = json.dumps(new_dict, ensure_ascii=False)
        di_model_next.lists.append(temp_info)

    result.next_dicts.insert.append(di_model_next)
    return result

def wanfangmedbs_wanfangmedbslist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]["1_1"]
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it

    for item in data['bs_info']['children']:
        rawid = item['href'].replace("http://med.wanfangdata.com.cn/Paper/Detail?id=DegreePaper_", "").replace(
            "&dbid=WF_XW", "")
        new_dict = dict()
        new_dict["url"] = item['href']
        new_dict["title"] = item['title']

        temp_info = info_dicts.copy()
        task_tag_next = temp_info["task_tag_next"]
        temp_info["task_tag"] = task_tag_next
        del temp_info["task_tag_next"]
        temp_info["rawid"] = rawid
        temp_info["article_json"] = json.dumps(new_dict, ensure_ascii=False)
        di_model_next.lists.append(temp_info)

    result.next_dicts.insert.append(di_model_next)
    return result

def wanfang_search_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    list_json = json.loads(sql_model.list_json)
    task_tag_next = task_info.task_tag_next.split(";")
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        rsinfo = para_dicts["data"]["1_1"].get("facetResultsList")[0]["result"]
        navList = rsinfo["bucketsList"]
        di_model_next1 = DealInsertModel()
        di_model_next2 = DealInsertModel()
        di_model_next1.insert_pre = CoreSqlValue.insert_ig_it
        di_model_next2.insert_pre = CoreSqlValue.replace_it
        years = "9999"
        for item in navList:
            cname = item["text"]
            cvalue = item["val"]
            totalnum = item["count"]
            if totalnum <= 6000:
                total_page = int((totalnum + 49) / 50)
                result.code_dicts = {
                    "1_1": {"max_page": total_page}
                }
                for page_index in range(1, total_page + 1):
                    tmp_json = list_json.copy()
                    tmp_json.pop("facetField")
                    tmp_json.update({
                        "class_fid": "ROOT",
                        "class_id": cvalue,
                        "years": years,
                        "class_name": cname
                    })
                    new_dict = {
                        "task_name": sql_model.task_name,
                        "task_tag": task_tag_next[1],
                        "sub_db_id": sql_model.sub_db_id,
                        "list_rawid": "{}_{}_{}".format(list_json["search_id"], cvalue, years),
                        "page": total_page,
                        "page_index": page_index,
                        "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                    }
                    di_model_next1.lists.append(new_dict)
            else:
                tmp_json = list_json.copy()
                tmp_json.update({
                    "page_size": 20,
                    "years": years,
                    "fid": cvalue,
                    "classid": "",
                    "class_fid": "ROOT",
                    "class_id": cvalue,
                    "class_name": cname,
                    "totalnum": totalnum
                })
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_tag_next[0],
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}_{}".format(list_json["search_id"], cvalue, years),
                    "page": 1,
                    "page_index": "1",
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next2.lists.append(new_dict)
        if rsinfo.get("isMore", "") == True:  # ROOT分类有分页
            di_model_before = DealInsertModel()
            di_model_before.insert_pre = CoreSqlValue.replace_it
            home_json = list_json.copy()
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "list_rawid": list_json["search_id"],
                "page": 1,
                "page_index": sql_model.page_index + 10,
                "list_json": json.dumps(home_json, ensure_ascii=False)
            }
            di_model_before.lists.append(new_dict)
            result.befor_dicts.insert.append(di_model_before)
        result.next_dicts.insert.append(di_model_next1)
        result.next_dicts.insert.append(di_model_next2)
    return result

def wanfang_search_typelist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    task_tag_next = task_info.task_tag_next.split(";")
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        pdata = para_dicts["data"]["1_1"]
        if pdata.get("message", "") == "聚类异常":
            if subclass_json.get("totalnum") > 6000:
                di_model_next0 = DealInsertModel()
                di_model_next0.insert_pre = CoreSqlValue.replace_it
                tmp_json = subclass_json.copy()
                tmp_json.update({
                    "page_size": "10",
                    "fid": "",
                    "class_fid": subclass_json["class_id"],
                    "facetField": "PublishYear"
                })
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_tag_next[0],
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}_{}".format(subclass_json["search_id"], tmp_json["class_id"],
                                                    tmp_json["years"]),
                    "page": 1,
                    "page_index": "1",
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next0.lists.append(new_dict)
                result.next_dicts.insert.append(di_model_next0)
        else:
            rsinfo = pdata.get("facetResultsList")[0]["result"]
            navList = rsinfo["bucketsList"]
            di_model_next1 = DealInsertModel()
            di_model_next2 = DealInsertModel()
            years = "9999"
            for item in navList:
                cname = item["text"]
                cvalue = item["val"]
                totalnum = item["count"]
                if totalnum <= 6000:
                    di_model_next1.insert_pre = CoreSqlValue.insert_ig_it
                    total_page = int((totalnum + 49) / 50)
                    result.code_dicts = {
                        "1_1": {"max_page": total_page}
                    }
                    for page_index in range(1, total_page + 1):
                        tmp_json = subclass_json.copy()
                        tmp_json.pop("facetField")
                        tmp_json.update({
                            "class_fid": subclass_json["class_id"],
                            "class_id": cvalue,
                            "years": years,
                            "class_name": cname
                        })
                        new_dict = {
                            "task_name": sql_model.task_name,
                            "task_tag": task_tag_next[1],
                            "sub_db_id": sql_model.sub_db_id,
                            "list_rawid": "{}_{}_{}".format(subclass_json["search_id"], cvalue, years),
                            "page": total_page,
                            "page_index": page_index,
                            "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                        }
                        di_model_next1.lists.append(new_dict)
                else:
                    di_model_next2.insert_pre = CoreSqlValue.replace_it
                    tmp_json = subclass_json.copy()
                    tmp_json.update({
                        "page_size": 20,
                        "years": years,
                        "fid": cvalue,
                        "classid": cvalue,
                        "class_fid": subclass_json["class_id"],
                        "class_id": cvalue,
                        "class_name": cname,
                        "totalnum": totalnum
                    })
                    new_dict = {
                        "task_name": sql_model.task_name,
                        "task_tag": sql_model.task_tag,
                        "sub_db_id": sql_model.sub_db_id,
                        "list_rawid": "{}_{}_{}".format(subclass_json["search_id"], cvalue, years),
                        "page": 1,
                        "page_index": "1",
                        "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                    }
                    di_model_next2.lists.append(new_dict)
            result.next_dicts.insert.append(di_model_next1)
            result.next_dicts.insert.append(di_model_next2)
    return result

def wanfang_search_yearlist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        rsinfo = para_dicts["data"]["1_1"].get("facetResultsList")[0]["result"]
        navList = rsinfo["bucketsList"]
        di_model_next1 = DealInsertModel()
        di_model_next1.insert_pre = CoreSqlValue.replace_it
        for item in navList:
            years = item["val"]
            totalnum = item["count"]
            if totalnum <= 6000:
                total_page = int((totalnum + 49) / 50)
            else:
                total_page = int((6000 + 49) / 50)
            result.code_dicts = {
                "1_1": {"max_page": total_page}
            }
            for page_index in range(1, total_page + 1):
                tmp_json = subclass_json.copy()
                tmp_json.pop("facetField")
                tmp_json.pop("page_size")
                tmp_json.update({
                    "years": years,
                    "totalnum": totalnum
                })
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": "{}_{}_{}".format(subclass_json["search_id"], tmp_json["class_id"], years),
                    "page": total_page,
                    "page_index": page_index,
                    "subclass_json": json.dumps(tmp_json, ensure_ascii=False)
                }
                di_model_next1.lists.append(new_dict)
        if rsinfo.get("isMore", "") == True:  # ROOT分类有分页
            di_model_before = DealInsertModel()
            di_model_before.insert_pre = CoreSqlValue.replace_it
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "list_rawid": sql_model.list_rawid,
                "page": 1,
                "page_index": sql_model.page_index + 10,
                "subclass_json": json.dumps(subclass_json.copy(), ensure_ascii=False)
            }
            di_model_before.lists.append(new_dict)
            result.befor_dicts.insert.append(di_model_before)
        result.next_dicts.insert.append(di_model_next1)
    return result

def wanfangun_search_article_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    dealmodel = DealModel()
    html_dicts = callmodel.para_dicts
    if "1_2" in html_dicts.keys():
        html_ref_dicts = html_dicts["1_2"]["data"]["1"]
        if "raw_base64" in html_ref_dicts.keys():
            del html_ref_dicts["raw_base64"]
        if html_ref_dicts:
            html_ref_total = html_ref_dicts["total"]
        else:
            # 如果没有记做0页
            html_ref_total = 0
        html_ref_page = math.ceil(int(html_ref_total) / 10)
        dealmodel.code_dicts.update({"1_2": {"max_page": html_ref_page}})
    if "1_3" in html_dicts.keys():
        html_cite_dicts = html_dicts["1_3"]["data"]["1"]
        if "raw_base64" in html_cite_dicts.keys():
            del html_cite_dicts["raw_base64"]
        if html_cite_dicts:
            html_cite_total = html_cite_dicts["total"]
        else:
            # 如果没有记做0页
            html_cite_total = 0
        html_cite_page = math.ceil(int(html_cite_total) / 10)
        dealmodel.code_dicts.update({"1_3": {"max_page": html_cite_page}})
    return dealmodel

def wanfangthesis_wanfangthesis_class_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        result.befor_dicts.update.update({'is_active': 1})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["ThesisSchool"]["value"]:
            if item["pid"] == "d":
                continue
            home_json = json.loads(sql_model.home_json)
            school_id = item["id"]
            home_json["search_id"] = school_id
            home_json["search_name"] = item["name"]
            home_json["school_id"] = school_id
            home_json["school_fid"] = item["pid"]
            home_json["school_name"] = item["name"]
            home_json["facetField"] = "ClassCodeForSearch"
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "list_rawid": school_id,
                "page": 1,
                "page_index": "1",
                "list_json": json.dumps(home_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result

def wanfangthesis_wanfangthesis_search_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_list_callback(callmodel)

def wanfangthesis_wanfangthesis_typelist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    return wanfang_search_typelist_callback(callmodel)

def wanfangthesis_wanfangthesis_yearlist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    return wanfang_search_yearlist_callback(callmodel)

def wanfangthesis_wanfangthesis_search_sublist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        rslist = para_dicts["data"]["1_1"]["resource"]
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for rs in rslist:
            uid = rs["uid"]
            wanID = rs["thesis"]["wanId"]
            try:
                years = rs["thesis"]["publishyear"]
            except:
                years = ""
            article_json = subclass_json.copy()
            article_json.update({
                "years": years,
                "uid": uid
            })
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "rawid": wanID,
                "article_info_json": json.dumps(article_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangthesis_wanfangthesis_search_article_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    return wanfangun_search_article_callback(callmodel)


def getJsonVal(data, key):
    val = data.get(key, "")
    if isinstance(val, list):
        if len(val) > 0:
            val = cleanSemicolon(";".join(val))
        else:
            val = ""
    else:
        val = cleanSemicolon(str(val))
    return val


def get_json_val(dval, key):
    if isinstance(dval, list):
        for item in dval:
            if item["name"] == key:
                val = item.get("value")
                if val is None or val == "null":
                    val = ""
                return val
    else:
        val = dval.get(key, "")
        if val is None or val == "null":
            val = ""
        return val
    return ""


def wanfangthesis_wanfangthesis_search_article_etl_callback(callmodel) -> EtlDealModel:
    # 1_1 题录  1_2引文 1_3被引
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    src_data = down_model["1_1"]
    ref_raw = down_model["1_2"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    src_data = json.loads(src_data.html).get("Resource")
    if not src_data or len(src_data) == 0:
        result.status = "FAILED"
        result.code = 2
        result.err_msg = "wanfangthesis_wanfangthesis_article_callback 原始json无detail字段"
        return result
    src_data = src_data[0]["thesis"]
    data = {}
    # data_refcnt = para_dicts["data"]["1_2"]
    # data_ref = down_model["1_3"].dict()
    rawid = src_data["wanId"]
    cited_cnt = str(src_data.get("citedcount", ""))
    if cited_cnt == "" or not cited_cnt.isdigit():
        cited_cnt = "0"
    cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    # data["ref_cnt"] = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00005"
    product = "WANFANG"
    sub_db = "CDDB"
    provider = "WANFANG"
    source_type = "4"
    data["is_deprecated"] = "0"
    data["rawid"] = rawid
    data["rawid_mysql"] = sql_model["rawid"]
    # data["rawid_alt"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://d.wanfangdata.com.cn/thesis/" + rawid
    data["fulltext_type"] = "pdf"
    data["country"] = "CN"
    data["language"] = "ZH"

    title = ""
    tmps_title = src_data.get("titleList")
    if checkExist(tmps_title) > 0:
        title = tmps_title[0]
    data["title"] = cleanSemicolon(title)
    abstract_ = ""
    tmps_abs = src_data.get("abstractList")
    if checkExist(tmps_abs) > 0:
        abstract_ = tmps_abs[0]
    data["abstract"] = cleanSemicolon(abstract_)
    data["doi"] = cleanSemicolon(getJsonVal(src_data, "doi"))
    author = ""
    author_1st = ""
    tmps_au = src_data.get("creatorList")
    if checkExist(tmps_au) > 0:
        author = cleanSemicolon(";".join(tmps_au))
        author_1st = cleanSemicolon(tmps_au[0])
    data["author"] = author
    data["author_1st"] = author_1st
    data["subject_major"] = cleanSemicolon(getJsonVal(src_data, "major"))
    data["degree"] = cleanSemicolon(getJsonVal(src_data, "degree"))
    organ_1st = ""
    organ = getJsonVal(src_data, "organizationnewList")
    if len(organ) == 0:
        organ = getJsonVal(src_data, "originalorganizationList")
    if len(organ) > 0:
        if ";" in organ:
            organ_1st = organ.split(";")[0]
        else:
            organ_1st = organ
    data["organ_1st"] = organ_1st
    data["organ"] = organ
    data["contributor"] = cleanSemicolon(getJsonVal(src_data, "tutorList"))
    pub_year = cleanSemicolon(getJsonVal(src_data, "publishyear"))
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    else:
        pub_year = ""
    pub_date_alt = cleanSemicolon(getJsonVal(src_data, "metadataonlinedate")).replace("-", "").split(" ")[0]
    if pub_date_alt[0] != "1" and pub_date_alt[0] != "2":
        pub_date_alt = ""
    if len(pub_date_alt) > 0 and len(pub_date) == 0 and len(pub_year) == 0:
        pub_date = pub_date_alt
        pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    data["pub_date"] = pub_date
    data["pub_date_alt"] = pub_date_alt
    clc_no = ""
    clc_no_1st = ""
    tmps_cls = src_data.get("classcodeList")
    if checkExist(tmps_cls) > 0:
        clc_no = cleanSemicolon(";".join(tmps_cls))
        clc_no_1st = cleanSemicolon(tmps_cls[0])
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["keyword"] = cleanSemicolon(getJsonVal(src_data, "keywordsList"))
    data["keyword_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedkeywordsList"))

    data["clc_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedclasscodeList"))
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "wanfangthesis_wanfangthesis_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1 and len(data["title_alt"]) < 1:
        err_msg = "wanfangthesis_wanfangthesis_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    idx = 0
    ref_raw = json.loads(ref_raw.json())
    stdict = {
        "periodical": ["J", "00004"],
        "thesis": ["D", "00005"],
        "conference": ["C", "00105"],
        "patent": ["P", "00052"],
        "standard": ["S", "00030"],
        "book": ["M", ""]
    }
    for k, v in ref_raw["page_html"].items():
        if v and "Resource" in v.get("html"):
            detail_dict = json.loads(v["html"])["Resource"]
            ref_down_date = v["down_date"].split(" ")[0].replace("-", "")
            for items in detail_dict:
                if len(items.keys()) == 0:
                    continue
                idx += 1
                ref_one = {}
                key = list(items.keys())[0]
                dicts_one = items[key]
                sub_info = stdict.get(key)
                if sub_info is None:
                    continue
                # print(stdict,key,sub_info)
                ref_one["sub_db_id"] = sub_info[1]
                ref_one["cited_rawid"] = rawid
                ref_one["cited_lngid"] = lngid
                ref_lngid = "{}{}".format(lngid, str(idx).zfill(4))
                ref_one["lngid"] = ref_lngid
                ref_one["keyid"] = ref_lngid
                ref_one["strtype"] = sub_info[0]
                ref_one = wanfangarticle_ref_parse(ref_one, dicts_one)
                ref_one.pop("sub_db_id")
                if len(ref_one["refer_text_site"]) > 1:
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def wanfangarticle_ref_parse(data, ref_json):
    strtype = data["strtype"]
    title = getJsonVal(ref_json, "titleList")
    idx = title.find('%')
    if idx > 0:
        title = title[0:idx].strip()
    title = cleanSemicolon(title)
    source_name = getJsonVal(ref_json, "periodicaltitleList")
    author = cleanSemicolon(getJsonVal(ref_json, "creatorList").replace("%", ";"))
    pub_year = str(ref_json.get("publishyear", ""))
    vol = getJsonVal(ref_json, "volum")
    num = getJsonVal(ref_json, "issue")
    publisher = getJsonVal(ref_json, "publisher")
    page_info = getJsonVal(ref_json, "page")
    doi = getJsonVal(ref_json, "doi")
    data["title"] = title
    data["source_name"] = source_name
    data["author"] = author
    data["pub_year"] = pub_year
    data["vol"] = vol
    data["num"] = num
    data["publisher"] = publisher
    data["doi"] = doi
    jump_page = ""
    begin_page = ""
    end_page = ""
    line = page_info
    idx = line.find(',')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["page_info"] = page_info
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page

    old_linked_id = ref_json.get("wanId", "")
    if "^" in old_linked_id:
        old_linked_id = ""
    data["old_linked_id"] = old_linked_id
    linked_id = ""
    if len(old_linked_id) > 0 and len(data["sub_db_id"]) > 0:
        linked_id = BaseLngid().GetLngid(data["sub_db_id"], old_linked_id)
    data["linked_id"] = linked_id
    data["refer_text_raw"] = json.dumps(ref_json, ensure_ascii=False)
    refer_text_site = ""
    if strtype == "S":
        if len(source_name) > 0:
            refer_text_site += source_name + ","
        if len(title) > 0:
            refer_text_site += title + "[{}].".format(strtype)
    else:
        if len(author) > 0:
            refer_text_site += author.replace(";", ",") + "."
        if len(title) > 0:
            refer_text_site += title + "[{}].".format(strtype)
        if len(source_name) > 0:
            refer_text_site += source_name + ","
        if len(pub_year) > 0:
            refer_text_site += pub_year
        if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
            refer_text_site = refer_text_site[:-1]
        if strtype == "J":
            if len(vol) > 0:
                refer_text_site += "," + vol
            if len(num) > 0:
                refer_text_site += "({})".format(num)
        if strtype in ("J", "M", "D", "C"):
            if len(begin_page) > 0:
                refer_text_site += ":" + data["begin_page"]
                if len(data["end_page"]) > 0:
                    refer_text_site += "-" + data["end_page"]
    data["refer_text_site"] = refer_text_site
    return data


def wanfangconference_wanfangconference_class_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = 0
        total = int(data["total"])
        if total > 0:
            total_page = int((total + 49) / 50)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            info_dicts = {
                "list_rawid": sql_model.list_rawid,
                "task_name": sql_model.task_name,
                "task_tag": sql_model.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "home_json": sql_model.home_json,
            }
            if total_page > 1:
                d_i_model = DealInsertModel()
                d_i_model.insert_pre = CoreSqlValue.insert_ig_it
                for i in range(1, total_page):
                    temp = info_dicts.copy()
                    temp["page"] = total_page
                    temp["page_index"] = i * 50
                    d_i_model.lists.append(temp)
                result.befor_dicts.insert.append(d_i_model)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["meetingModels"]:
            hy = item["modelMap"]["Conference"]["value"][0]
            meeting_id = hy["Id"]
            if len(meeting_id) != 0:
                meeting_date = hy["Date"].split(" ")[0].replace("-", "")
                home_json = json.loads(sql_model.home_json)
                home_json.update({
                    "search_id": meeting_id,
                    "search_name": hy["Title"][0],
                    "meeting_id": meeting_id,
                    "meeting_name": hy["Title"][0],
                    "meeting_date": meeting_date,
                    "meeting_year": meeting_date[0:4],
                    "meeting_venue": hy["Venue"],
                    "meeting_level": hy["Level"],
                    "facetField": "ClassCodeForSearch"
                })
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": meeting_id,
                    "page": 1,
                    "page_index": "1",
                    "list_json": json.dumps(home_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangconference_wanfangconference_search_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_list_callback(callmodel)


def wanfangconference_wanfangconference_typelist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_typelist_callback(callmodel)


def wanfangconference_wanfangconference_yearlist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_yearlist_callback(callmodel)


def wanfangconference_wanfangconference_search_sublist_callback(
        callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        rslist = para_dicts["data"]["1_1"]["resource"]
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for rs in rslist:
            uid = rs["uid"]
            wanID = rs["conference"]["wanId"]
            try:
                years = rs["conference"]["meetingyear"]
            except:
                years = ""
            article_json = subclass_json.copy()
            article_json.update({
                "years": years,
                "uid": uid
            })
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "rawid": wanID,
                "article_info_json": json.dumps(article_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangconference_wanfangconference_search_article_callback(
        callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    return wanfangun_search_article_callback(callmodel)


def wanfangconference_wanfangconference_search_article_etl_callback(callmodel) -> EtlDealModel:
    # 1_1 题录  1_2引文 1_3被引
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result

    meeting_level = sql_model["meeting_level"]
    if len(meeting_level.strip()) == 0 or "国际会议" in meeting_level:
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "该题录为国际会议"
        return result
    src_data = down_model["1_1"]
    ref_raw = down_model["1_2"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    src_data = json.loads(src_data.html).get("Resource")
    if not src_data or len(src_data) == 0:
        result.status = "FAILED"
        result.code = 2
        result.err_msg = "wanfangconference_wanfangconference_article_etl_callback 原始json无detail字段"
        return result
    src_data = src_data[0]["conference"]
    data = {}
    # data_refcnt = para_dicts["data"]["1_2"]
    # data_ref = down_model["1_3"].dict()
    rawid = src_data["wanId"]
    cited_cnt = str(src_data.get("citedcount", ""))
    if cited_cnt == "" or not cited_cnt.isdigit():
        cited_cnt = "0"
    cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    # data["ref_cnt"] = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00105"
    product = "WANFANG"
    sub_db = "CCPD"
    provider = "WANFANG"
    source_type = "6"
    data["is_deprecated"] = "0"
    data["rawid"] = rawid
    data["rawid_mysql"] = sql_model["rawid"]
    # data["rawid_alt"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://d.wanfangdata.com.cn/conference/" + rawid
    data["fulltext_type"] = "pdf"
    data["country"] = "CN"
    data["language"] = "ZH"

    title = ""
    tmps_title = src_data.get("titleList")
    if checkExist(tmps_title) > 0:
        title = tmps_title[0]
    data["title"] = cleanSemicolon(title)
    abstract_ = ""
    tmps_abs = src_data.get("abstractList")
    if checkExist(tmps_abs) > 0:
        abstract_ = tmps_abs[0]
    data["abstract"] = cleanSemicolon(abstract_)
    data["keyword"] = cleanSemicolon(getJsonVal(src_data, "keywordsList"))
    data["keyword_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedkeywordsList"))
    clc_no = ""
    clc_no_1st = ""
    tmps_cls = src_data.get("classcodeList")
    if checkExist(tmps_cls) > 0:
        clc_no = cleanSemicolon(";".join(tmps_cls))
        clc_no_1st = cleanSemicolon(tmps_cls[0])
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st

    meeting_date_raw = cleanSemicolon(getJsonVal(src_data, "meetingdate"))
    if not meeting_date_raw:
        meeting_date_raw = cleanSemicolon(getJsonVal(src_data, "metadataonlinedate"))
    # print(meeting_date_raw)
    if bool(re.match('^\d{1,2}-\d{1,2}', meeting_date_raw)):
        accept_date = datetime.datetime.strptime(re.sub('-\d{1,2}', '', meeting_date_raw.replace(',', '')), "%d %B %Y").strftime("%Y%m%d")
    elif bool(re.match('^[A-Z]', meeting_date_raw)):
        if len(meeting_date_raw.split(' ')[0]) == 3:
            accept_date = datetime.datetime.strptime(re.sub('-.* ', ' ', meeting_date_raw), "%b %d %Y").strftime("%Y%m%d")
        else:
            accept_date = datetime.datetime.strptime(re.sub('-.* ', ' ', meeting_date_raw), "%B %d %Y").strftime("%Y%m%d")
    elif bool(re.match('^\d{1,2} ', meeting_date_raw)):
        accept_date = datetime.datetime.strptime(re.sub('-.*?, ', '', meeting_date_raw), "%d %B %Y").strftime("%Y%m%d")
    else:
        accept_date = re.sub('\D','', meeting_date_raw)[:8].ljust(8, '0')
        if not(accept_date.startswith('19') or accept_date.startswith('20')):
            raise Exception(meeting_date_raw)
    # print(accept_date)
    # tmps_acc = meeting_date_raw.split("-")
    # if len(tmps_acc) > 0 and len(tmps_acc[0]) == 8:
    #     accept_date = tmps_acc[0]
    # else:
    #     accept_date = meeting_date_raw.replace("-", "").split(" ")[0]
    #     if accept_date[0] != "1" and accept_date[0] != "2":
    #         accept_date = ""
    #     if len(accept_date) < 4:
    #         accept_date = ""
    #     elif len(accept_date) == 4:
    #         accept_date = accept_date + "0000"
    #     elif len(accept_date) != 8:
    #         exts = accept_date[4:len(accept_date)]
    #         if len(exts) == 3:
    #             exts = exts[0:2] + "0" + exts[2:len(exts)]
    #         elif len(exts) == 2:
    #             exts = exts[0:2] + "00"
    #         elif len(exts) == 1:
    #             exts = "0" + exts + "00"
    #         elif len(exts) == 0:
    #             exts = "0000"
    #         accept_date = accept_date[0:4] + exts
    pub_date = accept_date
    pub_year = ""
    if len(pub_date) > 0:
        pub_year = pub_date[0:4]
    pub_date_alt = cleanSemicolon(getJsonVal(src_data, "metadataonlinedate")).replace("-", "").split(" ")[0]
    if pub_date_alt[0] != "1" and pub_date_alt[0] != "2":
        pub_date_alt = ""
    if len(pub_date_alt) > 0 and len(pub_date) == 0 and len(pub_year) == 0:
        pub_date = pub_date_alt
        pub_year = pub_date[0:4]
    data["meeting_date_raw"] = meeting_date_raw
    data["accept_date"] = accept_date
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["pub_date_alt"] = pub_date_alt
    data["host_organ"] = cleanSemicolon(getJsonVal(src_data, "sponsorList"))
    data["author"] = cleanSemicolon(getJsonVal(src_data, "creatorList"))
    data["author_1st"] = cleanSemicolon(getJsonVal(src_data, "firstcreator"))
    organ_1st = ""
    organ = getJsonVal(src_data, "originalorganizationList")
    if len(organ) > 0:
        if ";" in organ:
            organ_1st = organ.split(";")[0]
        else:
            organ_1st = organ
    data["organ_1st"] = organ_1st
    data["organ"] = organ
    meeting_name = ""
    tmps_mtitle = src_data.get("meetingtitleList")
    if checkExist(tmps_mtitle) > 0:
        meeting_name = cleanSemicolon(tmps_mtitle[0])
    data["meeting_name"] = meeting_name
    data["meeting_record_name"] = cleanSemicolon(getJsonVal(src_data, "meetingcorpus"))
    data["meeting_place"] = cleanSemicolon(getJsonVal(src_data, "meetingarea"))
    page_info = cleanSemicolon(getJsonVal(src_data, "page"))
    begin_page = ""
    end_page = ""
    if len(page_info) > 0 and "-" in page_info:
        tmps = page_info.split("-")
        begin_page = tmps[0]
        if len(tmps) > 1:
            end_page = tmps[1]
    data["page_info"] = page_info
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["clc_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedclasscodeList"))
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "wanfangconference_wanfangconference_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1 and len(data["title_alt"]) < 1:
        err_msg = "wanfangconference_wanfangconference_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    idx = 0
    ref_raw = json.loads(ref_raw.json())
    stdict = {
        "periodical": "J",
        "thesis": "D",
        "conference": "C",
        "patent": "P",
        "standard": "S",
        "book": "M"
    }
    for k, v in ref_raw["page_html"].items():
        if v and "Resource" in v.get("html"):
            detail_dict = json.loads(v["html"])["Resource"]
            ref_down_date = v["down_date"].split(" ")[0].replace("-", "")
            for items in detail_dict:
                if len(items.keys()) == 0:
                    continue
                idx += 1
                ref_one = {}
                key = list(items.keys())[0]
                dicts_one = items[key]
                ref_one["is_deprecated"] = "0"
                ref_one["batch"] = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
                ref_one["sub_db_id"] = sub_db_id
                ref_one["product"] = product
                ref_one["sub_db"] = sub_db
                ref_one["provider"] = provider
                ref_one["down_date"] = ref_down_date
                ref_one["cited_rawid"] = rawid
                ref_one["cited_lngid"] = lngid
                ref_one["sub_db_id"] = sub_db_id
                ref_lngid = "{}{}".format(lngid, str(idx).zfill(4))
                ref_one["lngid"] = ref_lngid
                ref_one["keyid"] = ref_lngid
                ref_one["strtype"] = stdict[key]
                ref_one = wanfangarticle_ref_parse(ref_one, dicts_one)
                if len(ref_one["refer_text_site"]) > 1:
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def wanfangunconference_wanfangunconference_class_callback(callmodel: CallBackModel[OtherHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = 0
        total = int(data["total"])
        if total > 0:
            total_page = int((total + 49) / 50)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            info_dicts = {
                "list_rawid": sql_model.list_rawid,
                "task_name": sql_model.task_name,
                "task_tag": sql_model.task_tag,
                "sub_db_id": sql_model.sub_db_id,
                "home_json": sql_model.home_json,
            }
            if total_page > 1:
                d_i_model = DealInsertModel()
                d_i_model.insert_pre = CoreSqlValue.insert_ig_it
                for i in range(1, total_page):
                    temp = info_dicts.copy()
                    temp["page"] = total_page
                    temp["page_index"] = i * 50
                    d_i_model.lists.append(temp)
                result.befor_dicts.insert.append(d_i_model)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["meetingModels"]:
            hy = item["modelMap"]["Conference"]["value"][0]
            meeting_id = hy["Id"]
            if len(meeting_id) != 0:
                meeting_date = hy["Date"].split(" ")[0].replace("-", "")
                home_json = json.loads(sql_model.home_json)
                home_json.update({
                    "search_id": meeting_id,
                    "search_name": hy["Title"][0],
                    "meeting_id": meeting_id,
                    "meeting_name": hy["Title"][0],
                    "meeting_date": meeting_date,
                    "meeting_year": meeting_date[0:4],
                    "meeting_venue": hy["Venue"],
                    "meeting_level": hy["Level"],
                    "facetField": "ClassCodeForSearch"
                })
                new_dict = {
                    "task_name": sql_model.task_name,
                    "task_tag": task_info.task_tag_next,
                    "sub_db_id": sql_model.sub_db_id,
                    "list_rawid": meeting_id,
                    "page": 1,
                    "page_index": "1",
                    "list_json": json.dumps(home_json, ensure_ascii=False)
                }
                di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangunconference_wanfangunconference_search_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_list_callback(callmodel)


def wanfangunconference_wanfangunconference_typelist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_typelist_callback(callmodel)


def wanfangunconference_wanfangunconference_yearlist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_yearlist_callback(callmodel)


def wanfangunconference_wanfangunconference_search_sublist_callback(
        callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    subclass_json = json.loads(sql_model.subclass_json)
    if "1_1" in para_dicts["data"]:
        result.befor_dicts.update.update({'is_active': 1})
        rslist = para_dicts["data"]["1_1"]["resource"]
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for rs in rslist:
            uid = rs["uid"]
            wanID = rs["conference"]["wanId"]
            try:
                years = rs["conference"]["meetingyear"]
            except:
                years = ""
            article_json = subclass_json.copy()
            article_json.update({
                "years": years,
                "uid": uid
            })
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "rawid": wanID,
                "article_json": json.dumps(article_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangunconference_wanfangunconference_search_article_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    return wanfangun_search_article_callback(callmodel)


def convert_month(input_str):
    # 解析输入字符串中的月份和年份
    date_obj = datetime.datetime.strptime(input_str, "%B, %Y")
    # 提取年月
    year = date_obj.year
    month = date_obj.month
    # 格式化日期为 "YYYYMMDD" 形式
    formatted_date = f"{year:04d}-{month:02d}-00"
    return formatted_date


def find_month_to_number(text):
    # 月份映射字典（全称和缩写）
    month_map = {
        'jan': '01', 'january': '01',
        'feb': '02', 'february': '02',
        'mar': '03', 'march': '03',
        'apr': '04', 'april': '04',
        'may': '05',
        'jun': '06', 'june': '06',
        'jul': '07', 'july': '07',
        'aug': '08', 'august': '08',
        'sep': '09', 'september': '09',
        'oct': '10', 'october': '10',
        'nov': '11', 'november': '11',
        'dec': '12', 'december': '12'
    }

    # 构建正则表达式模式：匹配所有月份名称和缩写
    pattern = r'\b(?:' + '|'.join(month_map.keys()) + r')\b'
    match = re.search(pattern, text, re.IGNORECASE)  # 忽略大小写搜索

    if match:
        matched_month = match.group().lower()  # 转换为小写以匹配字典键
        return month_map.get(matched_month)
    return None


def wanfangunconference_wanfangunconference_search_article_etl_callback(callmodel) -> EtlDealModel:
    # 1_1 题录  1_2引文 1_3被引
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result

    meeting_level = sql_model["meeting_level"]
    if len(meeting_level.strip()) == 0 or "国内会议" in meeting_level:
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "该题录为国内会议"
        return result
    src_data = down_model["1_1"]
    ref_raw = down_model["1_2"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    src_data = json.loads(src_data.html).get("Resource")
    if not src_data or len(src_data) == 0:
        result.status = "FAILED"
        result.code = 2
        result.err_msg = "wanfangunconference_wanfangunconference_article_etl_callback 原始json无detail字段"
        return result
    src_data = src_data[0]["conference"]
    data = {}
    # data_refcnt = para_dicts["data"]["1_2"]
    # data_ref = down_model["1_3"].dict()
    rawid = src_data["wanId"]
    cited_cnt = str(src_data.get("citedcount", ""))
    if cited_cnt == "" or not cited_cnt.isdigit():
        cited_cnt = "0"
    cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    # data["ref_cnt"] = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00411"
    product = "WANFANG"
    sub_db = "UNHY"
    provider = "WANFANG"
    source_type = "6"
    data["is_deprecated"] = "0"
    data["rawid"] = rawid
    data["rawid_mysql"] = sql_model["rawid"]
    # data["rawid_alt"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://d.wanfangdata.com.cn/conference/" + rawid
    data["fulltext_type"] = "pdf"
    data["country"] = "CN"
    data["language"] = "ZH"

    title = ""
    tmps_title = src_data.get("titleList")
    if checkExist(tmps_title) > 0:
        title = tmps_title[0]
    data["title"] = cleanSemicolon(title)
    abstract_ = ""
    tmps_abs = src_data.get("abstractList")
    if checkExist(tmps_abs) > 0:
        abstract_ = tmps_abs[0]
    data["abstract"] = cleanSemicolon(abstract_)
    data["keyword"] = cleanSemicolon(getJsonVal(src_data, "keywordsList"))
    data["keyword_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedkeywordsList"))
    clc_no = ""
    clc_no_1st = ""
    tmps_cls = src_data.get("classcodeList")
    if checkExist(tmps_cls) > 0:
        clc_no = cleanSemicolon(";".join(tmps_cls))
        clc_no_1st = cleanSemicolon(tmps_cls[0])
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st

    meeting_date_raw = cleanSemicolon(getJsonVal(src_data, "meetingdate"))
    if 'th' in meeting_date_raw:
        meeting_date_raw = ''
    if not meeting_date_raw:
        meeting_date_raw = cleanSemicolon(getJsonVal(src_data, "metadataonlinedate"))
    # print(meeting_date_raw)
    # if bool(re.match('^\d{1,2}-\d{1,2}', meeting_date_raw)):
    #     accept_date = datetime.datetime.strptime(re.sub('-\d{1,2}', '', meeting_date_raw.replace(',', '')), "%d %B %Y").strftime("%Y%m%d")
    # elif bool(re.match('^[A-Z]', meeting_date_raw)):
    #     if len(meeting_date_raw.split(' ')[0]) == 3:
    #         accept_date = datetime.datetime.strptime(re.sub('-.* ', ' ', meeting_date_raw.replace(')', '')), "%b %d %Y").strftime("%Y%m%d")
    #     else:
    #         accept_date = datetime.datetime.strptime(re.sub('-.* ', ' ', meeting_date_raw.replace(')', '')), "%B %d %Y").strftime("%Y%m%d")
    # elif bool(re.match('^\d{1,2} ', meeting_date_raw)):
    #     accept_date = datetime.datetime.strptime(re.sub('-.*?, ', '', meeting_date_raw.replace(')', '')), "%d %B %Y").strftime("%Y%m%d")
    # else:
    #     accept_date = re.sub('\D','', meeting_date_raw.replace(')', ''))[:8].ljust(8, '0')
    #     if not(accept_date.startswith('19') or accept_date.startswith('20')):
    #         raise Exception(meeting_date_raw)

    # 20250707
    date_s = re.sub(r'([A-Za-z]+)(\d+)', r'\1 \2', meeting_date_raw)
    if re.search(r'[a-zA-Z]', date_s) and '-' in date_s:
        month = find_month_to_number(date_s.lower())
        day = re.findall('(\d+).*?-', date_s)
        if not day:
            day = re.findall('-(\d+)', date_s)[0].rjust(2, '0')
        else:
            day = day[0].rjust(2, '0')
        year = re.findall('\d{4}', date_s)[0]
        accept_date =  f'{year}{month}{day}'
    elif re.search(r'[a-zA-Z]', date_s) and re.search(r'(\d+).*?,', date_s):
        month = find_month_to_number(date_s.lower())
        day = re.findall('(\d+).*?,', date_s)[0].rjust(2, '0')
        year = re.findall('\d{4}', date_s)[0]
        accept_date =  f'{year}{month}{day}'
    elif re.search(r'[a-zA-Z]', date_s) and re.search(r'\d{4}', date_s):
        month = find_month_to_number(date_s.lower())
        day = '00'
        year = re.findall('\d{4}', date_s)[0]
        accept_date = f'{year}{month}{day}'
    else:
        accept_date = re.sub('\D', '', date_s)[:8].ljust(8, '0')
        if not(accept_date.startswith('19') or accept_date.startswith('20')):
            raise Exception(meeting_date_raw)
    pub_date = accept_date
    pub_year = ""
    if len(pub_date) > 0:
        pub_year = pub_date[0:4]
    pub_date_alt = cleanSemicolon(getJsonVal(src_data, "metadataonlinedate")).replace("-", "").split(" ")[0]
    if pub_date_alt[0] != "1" and pub_date_alt[0] != "2":
        pub_date_alt = ""
    if len(pub_date_alt) > 0 and len(pub_date) == 0 and len(pub_year) == 0:
        pub_date = pub_date_alt
        pub_year = pub_date[0:4]
    data["meeting_date_raw"] = meeting_date_raw
    data["accept_date"] = accept_date
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["pub_date_alt"] = pub_date_alt
    data["host_organ"] = cleanSemicolon(getJsonVal(src_data, "sponsorList"))
    data["author"] = cleanSemicolon(getJsonVal(src_data, "creatorList"))
    data["author_1st"] = cleanSemicolon(getJsonVal(src_data, "firstcreator"))
    organ_1st = ""
    organ = getJsonVal(src_data, "originalorganizationList")
    if len(organ) > 0:
        if ";" in organ:
            organ_1st = organ.split(";")[0]
        else:
            organ_1st = organ
    data["organ_1st"] = organ_1st
    data["organ"] = organ
    meeting_name = ""
    tmps_mtitle = src_data.get("meetingtitleList")
    if checkExist(tmps_mtitle) > 0:
        meeting_name = cleanSemicolon(tmps_mtitle[0])
    data["meeting_name"] = meeting_name
    data["meeting_record_name"] = cleanSemicolon(getJsonVal(src_data, "meetingcorpus"))
    data["meeting_place"] = cleanSemicolon(getJsonVal(src_data, "meetingarea"))
    page_info = cleanSemicolon(getJsonVal(src_data, "page"))
    begin_page = ""
    end_page = ""
    if len(page_info) > 0 and "-" in page_info:
        tmps = page_info.split("-")
        begin_page = tmps[0]
        if len(tmps) > 1:
            end_page = tmps[1]
    data["page_info"] = page_info
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["clc_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedclasscodeList"))
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "wanfangunconference_wanfangunconference_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1 and len(data["title_alt"]) < 1:
        err_msg = "wanfangunconference_wanfangunconference_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "other_latest", "data": data})
    ref_id = ""
    list_ref = []
    idx = 0
    ref_raw = json.loads(ref_raw.json())
    stdict = {
        "periodical": "J",
        "thesis": "D",
        "conference": "C",
        "patent": "P",
        "standard": "S",
        "book": "M"
    }
    for k, v in ref_raw["page_html"].items():
        if v and "Resource" in v.get("html"):
            detail_dict = json.loads(v["html"])["Resource"]
            ref_down_date = v["down_date"].split(" ")[0].replace("-", "")
            for items in detail_dict:
                if len(items.keys()) == 0:
                    continue
                idx += 1
                ref_one = {}
                key = list(items.keys())[0]
                dicts_one = items[key]
                ref_one["is_deprecated"] = "0"
                ref_one["batch"] = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
                ref_one["sub_db_id"] = sub_db_id
                ref_one["product"] = product
                ref_one["sub_db"] = sub_db
                ref_one["provider"] = provider
                ref_one["down_date"] = ref_down_date
                ref_one["cited_rawid"] = rawid
                ref_one["cited_lngid"] = lngid
                ref_one["sub_db_id"] = sub_db_id
                ref_lngid = "{}{}".format(lngid, str(idx).zfill(4))
                ref_one["lngid"] = ref_lngid
                ref_one["keyid"] = ref_lngid
                ref_one["strtype"] = stdict[key]
                ref_one = wanfangarticle_ref_parse(ref_one, dicts_one)
                if len(ref_one["refer_text_site"]) > 1:
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        save_data.append({"table": "other_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def wanfangpatent_wanfangpatent_search_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    return wanfang_search_list_callback(callmodel)


def wanfangpatent_wanfangpatent_typelist_callback(callmodel: CallBackModel[OtherSubclassModel]) -> DealModel:
    return wanfang_search_typelist_callback(callmodel)


def samropenstdstandard_samropenstdstandard_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        list_json = json.loads(sql_model.list_json)
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = cleanSemicolon("".join(res.xpath("//span[contains(@class,'hidden-md')]//text()").extract()))
        if page_info:
            total_page = int(cleanSemicolon(page_info.split("/")[1]))
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = json.dumps(list_json, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        tr_list = res.xpath("//table[contains(@class,'result_list')]/tbody[2]/tr")
        for tr in tr_list:
            tds = tr.xpath("./td")
            article_json = dict()
            a_info = tds[1].xpath("./a")
            rawid = re.findall(r"showInfo\('(.*?)'\)",a_info.xpath("./@onclick").get(""))[0]
            if len(rawid) == 0:
                continue
            std_no = a_info.xpath("./text()").get("")
            article_json["std_no"] = std_no
            article_json["std_type"] = list_json["std_type"]
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "rawid": rawid,
                "article_json": json.dumps(article_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def samropenstdstandard_samropenstdstandard_article_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result


def samropenstdstandard_samropenstdstandard_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    #data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data = {}
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))

    rawid = sql_model["rawid"]
    data["rawid"] = rawid
    data["rawid_mysql"] = rawid
    data["batch"] = batch
    sub_db_id = "00668"
    product = "SAMROPENSTD"
    sub_db = "BZ"
    provider = "SAC"
    source_type = "5"
    data["is_deprecated"] = "0"
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://openstd.samr.gov.cn/bzgk/gb/newGbInfo?hcno=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"

    if sql_model["std_type"] == "强制性国家标准":
        data["is_mandatory"] = "1"
    else:
        data["is_mandatory"] = "0"

    data["std_no"] = sql_model["std_no"]
    adopt_relation = ''.join(sel.xpath("//h1[contains(string(),'标准号')]/span/@title").extract())
    if len(adopt_relation) > 0:
        adopt_relation = adopt_relation.replace("该标准采用","")
    data["adopt_relation"] = adopt_relation
    data["std_no"] = sql_model["std_no"]
    data["title"] = ''.join(sel.xpath("//td[contains(string(),'中文标准名称')]/b//text()").extract())
    data["title_alt"] = ''.join(sel.xpath("//td[contains(string(),'英文标准名称')]//text()").extract()).replace("英文标准名称：","").strip()
    data["legal_status"] = ''.join(sel.xpath("//td[contains(string(),'标准状态')]//text()").extract()).replace("标准状态：","").strip()
    data["ccs_no"] = ''.join(sel.xpath("//div[contains(text(),'中国标准分类号')]/following-sibling::div[1]//text()").extract()).strip()
    data["ccs_no_1st"] = data["ccs_no"]
    data["ics_no"] = ''.join(sel.xpath("//div[contains(text(),'国际标准分类号')]/following-sibling::div[1]//text()").extract()).strip()
    pub_date = ''.join(sel.xpath("//div[contains(text(),'发布日期')]/following-sibling::div[1]//text()").extract()).strip()
    pub_date = cleanSemicolon(pub_date.replace("-", "").replace("/", "").replace(" ", ""))
    pub_year = ""
    if len(pub_date) > 4:
        pub_year = pub_date[0:4]
    if len(pub_date) == 6:
        pub_date = pub_date + "00"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    impl_date = ''.join(sel.xpath("//div[contains(text(),'实施日期')]/following-sibling::div[1]//text()").extract()).strip()
    data["impl_date"] = cleanSemicolon(impl_date.replace("-", "").replace("/", "").replace(" ", ""))
    data["host_organ"] = ''.join(sel.xpath("//div[contains(text(),'主管部门')]/following-sibling::div[1]//text()").extract()).strip().replace("、",";")
    data["oran"] = ''.join(sel.xpath("//div[contains(text(),'归口')]/following-sibling::div[1]//text()").extract()).strip().replace("、",";")
    data["publisher"] = ''.join(sel.xpath("//div[contains(text(),'发布单位')]/following-sibling::div[1]//text()").extract()).strip().replace("、",";")
    memo = ''.join(sel.xpath("//div[contains(text(),'备注')]/following-sibling::div[1]//text()").extract()).strip().replace("、",";")
    replace_standard = ""
    if "实施,代替" in memo:
        tmps = memo.split("实施,代替")[1].split(",")
        replace_standard = ";".join(["{}@{}".format(data["impl_date"],item) for item in tmps])
        memo = ""
    data["replace_standard"] = replace_standard
    abol_date = ""
    if "该标准废止" in memo:
        abol_date = "".join(re.findall(r"自(.*?)年(.*?)月(.*?)日起",memo)[0])
        memo = ""
    data["abol_date"] = abol_date


    # data["memo"] = memo

    data["fulltext_type"] = "pdf"
    result.save_data = [{"table": "other_latest", "data": data}]
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "samropenstdstandard_samropenstdstandard_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "samropenstdstandard_samropenstdstandard_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    return result


def nstrsreport_nstrsreport_list_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    if "1_1" in para_dicts["data"]:
        list_json = json.loads(sql_model.list_json)
        data = para_dicts["data"]["1_1"]
        total_cnt = data["RESULT"]["count"]
        total_page = math.ceil(total_cnt / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = json.dumps(list_json, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        rs_list = data["RESULT"]["list"]
        for item in rs_list:
            article_json = dict()
            rawid = item["id"]
            if len(rawid) == 0:
                continue
            article_json["title"] = item["title"]
            new_dict = {
                "task_name": sql_model.task_name,
                "task_tag": task_info.task_tag_next,
                "sub_db_id": sql_model.sub_db_id,
                "rawid": rawid,
                "article_json": json.dumps(article_json, ensure_ascii=False)
            }
            di_model_next.lists.append(new_dict)
        result.next_dicts.insert.append(di_model_next)
    return result


def nstrsreport_nstrsreport_article_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result


def nstrsreport_nstrsreport_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    #data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    src_data = json.loads(src_data.html)["RESULT"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data = {}
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))

    rawid = sql_model["rawid"]
    data["rawid"] = rawid
    data["rawid_mysql"] = rawid
    data["batch"] = batch
    sub_db_id = "00669"
    product = "NSTRS"
    sub_db = "REPORT"
    provider = "ISTIC"
    source_type = "12"
    data["is_deprecated"] = "0"
    lngid = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://www.nstrs.cn/kjbg/detail?id=" + rawid
    data["country"] = "CN"
    data["language"] = "ZH"

    data["title"] = src_data.get("title","")
    data["title_alt"] = src_data.get("alternativeTitle", "")
    data["keyword"] = src_data.get("keywordsCn", "")
    data["keyword_alt"] = src_data.get("keywordsEn", "")
    data["subject"] = src_data.get("classification", "")
    data["abstract"] = src_data.get("abstractCn", "")
    data["abstract_alt"] = src_data.get("abstractEn", "")
    data["title"] = src_data.get("title", "")
    data["title"] = src_data.get("title", "")
    data["title"] = src_data.get("title", "")
    data["title"] = src_data.get("title", "")

    # print(data)

    result.save_data = [{"table": "other_latest", "data": data}]
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "samropenstdstandard_samropenstdstandard_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "samropenstdstandard_samropenstdstandard_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg

    # return result


def rscbook_rscbooklist_callback(callmodel: CallBackModel[OtherListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall(' of (\d+) *<', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 2099
            total_page = math.ceil(max_count / 20)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="item-info"]')
        if not li_list:
            raise Exception
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//h4/a/@href').extract_first()
            base_url = f'https://books.rsc.org/books/search-results'
            url = parse.urljoin(base_url, href)
            doi = "".join(li.xpath('.//span[contains(text(),"DOI:")]/parent::div[1]/text()').extract()).strip()
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = "00430"
            article_json["url"] = url
            article_json["title"] = "".join(li.xpath('.//h4/a//text()').extract())
            article_json["doi"] = doi
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def rscbook_rscbookarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result


def rscbook_rscbookarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//h1[@class="book-info__title"]/text()').extract()).strip()
    pub_date_info = res.xpath('//meta[@name="citation_publication_date"]/@content').extract_first()
    d = pub_date_info.split('/')
    pub_date = d[0] + d[1].rjust(2, '0') + d[2].rjust(2, '0')
    pub_year = pub_date[:4]

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "00430"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["provider"] = "ROYALSOCIETY"
    data["zt_provider"] = "rscbook"
    data["product"] = "RSC"
    data["sub_db_id"] = sub_db_id
    data["sub_db"] = "TS"
    data["source_type"] = "1"
    data["vision"] = "1"
    data["is_deprecated"] = "0"
    data["country"] = "GB"
    data["language"] = "EN"
    data["batch"] = down_date_str
    data["latest_date"] = down_date_str[:8]
    data["down_date"] = down_date_str[:8]
    data["rawid"] = rawid
    data['rawid_mysql'] = rawid
    data["rawid_alt"] = ''
    data["keyid"] = lngid
    data["lngid"] = lngid
    data["doi"] = article_json['doi'].strip()
    data["url"] = ""
    data["provider_url"] = provider_url

    data["title"] = title
    data["title_alt"] = ""
    data["title_sub"] = "".join(res.xpath('//h1[@class="book-info__title"]/span[@class="subtitle"]/text()').extract()).strip()
    data["title_series"] = ";".join(res.xpath('//span[text()="Series:"]/parent::div/a/text()').extract()).strip()
    data["isbn"] = "".join(res.xpath('//div[text()="Hardback ISBN:"]/following::div[1]/text()').extract()).strip()
    data["eisbn"] = "".join(res.xpath('//div[text()="EPUB ISBN:"]/following::div[1]/text()').extract()).strip()
    data["pdf_isbn"] = "".join(res.xpath('//div[text()="PDF ISBN:"]/following::div[1]/text()').extract()).strip()
    data["keyword"] = ''
    data["keyword_alt"] = ""
    data["keyword_machine"] = ""
    data["subject_word"] = ""
    data["clc_no_1st"] = ''
    data["clc_no"] = ''
    data["clc_machine"] = ""
    data["subject_edu_code"] = ""
    data["subject"] = "".join(res.xpath('//div[@class="book-series left-flag"]/text()').extract()).strip()
    data["abstract"] = "".join(res.xpath('//section[@class="abstract"]//text()').extract()).strip()
    data["abstract_alt"] = ""
    data["catalog"] = ""
    data["process_date"] = ""
    data["page_cnt"] = "".join(res.xpath('//div[text()="No. of Pages:"]/following::div[1]/text()').extract()).strip()
    data["word_cnt"] = ""
    data["price"] = ";".join(res.xpath('//div[@class="display_price_buy_resource"]/text()').extract()).strip()
    data["other_info"] = ""
    data["cover_path"] = ""
    data["collection"] = ";".join(res.xpath('//span[text()="Special Collection:"]/parent::div/a/text()').extract()).strip()
    data["pdf_size"] = ""
    data["fulltext_type"] = ""
    data["fulltext_addr"] = ""
    data["fund"] = ""
    data["fund_alt"] = ""
    data["fund_id"] = ""
    data["author_id"] = ""
    data["author"] = ";".join(res.xpath('//div[@class="book-info__authors"]//div[contains(@class,"al-author-name")]/a/text()').extract()).strip()
    data["author_nochange"] = ''
    data["author_alt"] = ""
    data["email"] = ""
    data["relation"] = ""
    data["author_intro"] = ""
    data["organ_id"] = ""
    data["organ"] = ""
    data["organ_alt"] = ""
    data["organ_area"] = ""
    data["publisher"] = ''
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["pub_place"] = ''
    data["book_size"] = ""
    data["revision"] = ""
    data["impressions"] = ""
    data["citation"] = ''
    save_data.append({'table': 'other_latest', 'data': data})

    result.save_data = save_data
    return result


def cambridgebook_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['href']
    res = Selector(text=html)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "00147"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data["provider"] = "CAMBRIDGE"
    data["zt_provider"] = "cambridgebook"
    data["product"] = "CAMBRIDGE"
    data["sub_db_id"] = sub_db_id
    data["sub_db"] = "TS"
    data["source_type"] = "1"
    data["vision"] = "1"
    data["is_deprecated"] = "0"
    data["country"] = "GB"
    data["language"] = "EN"
    data["batch"] = down_date_str
    data["latest_date"] = down_date_str[:8]
    data["down_date"] = down_date_str[:8]
    data["rawid"] = rawid
    data['rawid_mysql'] = rawid
    data["rawid_alt"] = ''
    data["keyid"] = lngid
    data["lngid"] = lngid
    data["doi"] = ''
    data["url"] = ""
    data["provider_url"] = provider_url

    title = "".join(res.xpath('//div[contains(@class,"bookDetailsWrap")]/h1/text()').extract()).strip()
    pub_date_info = ''.join(res.xpath('//div[contains(@class,"bookDetailsWrap")]//li[text()="Date Published: "]/span/text()').extract()).strip()
    if not pub_date_info:
        pub_date_info = ''.join(res.xpath('//div[contains(@class,"bookDetailsWrap")]//li[text()="Publication planned for: "]/span/text()').extract()).strip()
    d = datetime.datetime.strptime(pub_date_info, "%B %Y").strftime("%Y%m")
    pub_date = d.ljust(8, '0')
    pub_year = pub_date[:4]
    data["title"] = title
    data["title_alt"] = ""
    data["title_sub"] = "".join(res.xpath('//div[contains(@class,"bookDetailsWrap")]/h1/span[@class="subTitle"]//text()').extract()).strip()
    data["title_series"] = ";".join(res.xpath('//div[contains(@class,"bookDetailsWrap")]//p[text()="Part of "]/a/text()').extract()).strip()
    data["isbn"] = "".join(res.xpath('//div[contains(@class,"bookDetailsWrap")]//li[text()="isbn: "]/span/text()').extract()).strip()
    data["eisbn"] = ""
    data["keyword"] = ''
    data["keyword_alt"] = ""
    data["keyword_machine"] = ""
    data["subject_word"] = ""
    data["clc_no_1st"] = ''
    data["clc_no"] = ''
    data["clc_machine"] = ""
    data["subject_edu_code"] = ""
    data["subject"] = ""
    data["abstract"] = "".join(res.xpath('//li[@id="descriptionTab"]/p[1]//text()').extract()).strip()
    data["abstract_alt"] = ""
    data["catalog"] = ""
    data["process_date"] = ""
    data["page_cnt"] = "".join(res.xpath('//li[text()="length: "]/span/text()').extract()).strip()
    data["word_cnt"] = ""
    data["price"] = "".join(res.xpath('//div[@class="priceContainer"]/h2/*[not(@class="priceFormatLegend")]//text()').extract()).strip()
    data["other_info"] = ""
    data["cover_path"] = ""
    data["collection"] = ""
    data["pdf_size"] = ""
    data["fulltext_type"] = ""
    data["fulltext_addr"] = ""
    data["fund"] = ""
    data["fund_alt"] = ""
    data["fund_id"] = ""
    data["author_id"] = ""
    author_list = list()
    organ_list = list()
    author_intro_list = list()
    for p in res.xpath('//li[@id="authorsTab"]/p'):
        author_list.append("".join(p.xpath('strong//text()').extract()).strip())
        organ_list.append("".join(p.xpath('em//text()').extract()).strip())
        author_intro_list.append(" ".join(p.xpath('.//text()').extract()).strip())
    data["author"] = ";".join(author_list).strip()
    data["author_nochange"] = ''
    data["author_alt"] = ""
    data["email"] = ""
    data["relation"] = ""
    data["author_intro"] = ";".join(author_intro_list).strip()
    data["organ_id"] = ""
    data["organ"] = ";".join(organ_list).strip()
    data["organ_alt"] = ""
    data["organ_area"] = ""
    data["publisher"] = ''
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["pub_place"] = ''
    data["book_size"] = "".join(res.xpath('//li[text()="dimensions: "]/span/text()').extract()).strip()
    data["revision"] = ""
    data["impressions"] = ""
    data["citation"] = ''
    save_data.append({'table': 'other_latest', 'data': data})

    result.save_data = save_data
    return result


def ieeeconference_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_s = callmodel.para_dicts['data']['1_1']['html']
    html_j = json.loads(html_s)
    html = html_j['html']
    info_s = re.findall('\.document\.metadata=(.*?)};', html)[0] + '}'
    info_json = json.loads(info_s)
    references_json = json.loads(html_j['references'])
    # article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00371'
    product = 'IEEE'
    provider = 'IEEE'
    sub_db = 'HY'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'ieeeconference'
    data['source_type'] = '6'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "US"
    data["language"] = "EN"
    data['title'] = info_json['displayDocTitle']
    data['title_alt'] = ''
    data['title_sub'] = ""
    data['provider_url'] = f'https://ieeexplore.ieee.org/document/{info_json["articleId"]}'
    data['down_cnt'] = ''
    issn = ''
    isbn = ''
    if info_json.get('issn', ''):
        issn = info_json['issn'][0]['value']
    if info_json.get('isbn', ''):
        isbn = info_json['isbn'][0]['value']
    data['issn'] = issn
    data['isbn'] = isbn
    data['doi'] = info_json['doi']
    keyword_list = list()
    for keyword_i in info_json['keywords']:
        keyword_list.extend(keyword_i['kwd'])
    data['keyword'] = ';'.join(keyword_list)
    data['keyword_alt'] = ""
    data['keyword_machine'] = ""
    data['subject_word'] = ""
    data['clc_no_1st'] = ""
    data['clc_no'] = ""
    data['clc_machine'] = ""
    data['subject_edu_code'] = ""
    data['subject_edu'] = ""
    data['subject'] = ""
    data['sub_db_class_name'] = ""
    data['research_field'] = ""
    data['abstract'] = info_json['abstract']
    data['abstract_alt'] = ""
    data['begin_page_sort'] = ""
    data['begin_page'] = info_json['startPage']
    data['end_page'] = info_json['endPage']
    data['jump_page'] = ""
    data['page_info'] = ""
    data['doc_code'] = ""
    data['doc_no'] = ""
    data['process_date'] = ""
    data['recv_date'] = ""
    data['page_cnt'] = ""
    data['fulltext_type'] = ""
    data['pdf_size'] = ""
    data['fund'] = ""
    data['fund_alt'] = ""
    data['fund_id'] = ""
    author_list = list()
    organ_list = list()
    for author_info in info_json['authors']:
        author = author_info['name']
        s = f'{author}'
        for organ in author_info['affiliation']:
            if organ in organ_list:
                index = organ_list.index(organ)
            else:
                organ_list.append(organ)
                index = organ_list.index(organ)
            s = s + f'[{index + 1}]'
    data['author_id'] = ""
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['author'] = ';'.join(author_list)
    data['author_alt'] = ""
    data['corr_author'] = ""
    data['corr_author_id'] = ""
    data['email'] = ""
    data['author_intro'] = ""
    data['organ_id'] = ""
    data['organ_1st'] = ""
    data['organ'] = ';'.join([f'[{i+1}]{v}' for i, v in enumerate(organ_list)])
    data['organ_alt'] = ""
    data['preferred_organ'] = ""
    data['organ_area'] = ""
    data['meeting_name'] = info_json['publicationTitle']
    data['meeting_name_alt'] = ""
    data['meeting_record_name'] = ""
    data['meeting_record_name_alt'] = ""
    data['meeting_intro'] = ""
    data['meeting_place'] = info_json['confLoc']
    data['meeting_counts'] = ""
    data['accept_date'] = ""
    data['meeting_date_raw'] = ""
    data['publisher'] = info_json['publisher']
    pub_date_info = info_json['publicationDate']
    try:
        d = datetime.datetime.strptime(pub_date_info, "%B %Y").strftime("%Y%m")
    except:
        d = datetime.datetime.strptime(pub_date_info, "%d %B %Y").strftime("%Y%m%d")
    pub_date = d.ljust(8, '0')
    pub_year = pub_date[:4]
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_place'] = ""
    data['host_organ'] = ""
    data['host_organ_id'] = ""
    data['meeting_code'] = ""
    data['sponsor'] = ""
    data['society'] = ""
    data['meeting_level'] = ""
    data['edit_group'] = ""
    data['chief_editor'] = ""
    data['book_id'] = ""
    data['is_oa'] = 1 if info_json.get('isOpenAccess', '') == 'true' else ''
    data['vol'] = ""
    data['num'] = ""
    data['is_suppl'] = ""

    save_data.append({'table': 'other_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    num = 0
    for item in references_json['references']:
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        refer_text = re.sub(r'<.*?>', '', item['text'])
        ref_one["refer_text_raw"] = refer_text
        ref_one["refer_text_site"] = refer_text
        ref_one["author"] = ''
        ref_one["title"] = item.get('title', '')
        ref_one["strtype"] = ''
        ref_one["source_name"] = ''
        ref_one["pub_year"] = ''
        ref_one["vol"] = ''
        ref_one["num"] = ''
        doi = ''
        ref_one["doi"] = doi
        ref_one["begin_page"] = ''
        ref_one["end_page"] = ''
        ref_one["page_info"] = ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'other_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def initlanguageMap():
    codelanguageMap = {}
    str = "AA★AA;AB★AB;AE★AE;AF★AF;AK★AK;AM★AM;AN★AN;AR★AR;AS★AS;AV★AV;AY★AY;AZ★AZ;BA★BA;BE★BE;BG★BG;BH★BH;BI★BI;BM★BM;BN★BN;BO★BO;BR★BR;BS★BS;CA★CA;CE★CE;CH★CH;CO★CO;CR★CR;CS★CS;CU★CU;CV★CV;CY★CY;DA★DA;DE★DE;DV★DV;DZ★DZ;EE★EE;EL★EL;EN★EN;EO★EO;ES★ES;ET★ET;EU★EU;FA★FA;FF★FF;FI★FI;FJ★FJ;FO★FO;FR★FR;FY★FY;GA★GA;GD★GD;GL★GL;GN★GN;GU★GU;GV★GV;HA★HA;HE★HE;HI★HI;HO★HO;HR★HR;HT★HT;HU★HU;HY★HY;HZ★HZ;IA★IA;ID★ID;IE★IE;IG★IG;II★II;IK★IK;IO★IO;IS★IS;IT★IT;IU★IU;JA★JA;JV★JV;KA★KA;KG★KG;KI★KI;KJ★KJ;KK★KK;KL★KL;KM★KM;KN★KN;KO★KO;KR★KR;KS★KS;KU★KU;KV★KV;KW★KW;KY★KY;LA★LA;LB★LB;LG★LG;LI★LI;LN★LN;LO★LO;LT★LT;LU★LU;LV★LV;MD★MD;MG★MG;MH★MH;MI★MI;MK★MK;ML★ML;MN★MN;MO★MO;MR★MR;MS★MS;MT★MT;MY★MY;NA★NA;NB★NB;ND★ND;NE★NE;NG★NG;NL★NL;NN★NN;NO★NO;NR★NR;NV★NV;NY★NY;OC★OC;OJ★OJ;OM★OM;OR★OR;OS★OS;PA★PA;PI★PI;PL★PL;PS★PS;PT★PT;QU★QU;RM★RM;RN★RN;RO★RO;RU★RU;RW★RW;SA★SA;SC★SC;SD★SD;SE★SE;SG★SG;SH★SH;SI★SI;SK★SK;SL★SL;SM★SM;SN★SN;SO★SO;SQ★SQ;SR★SR;SS★SS;ST★ST;SU★SU;SV★SV;SW★SW;TA★TA;TE★TE;TG★TG;TH★TH;TI★TI;TK★TK;TL★TL;TN★TN;TO★TO;TR★TR;TS★TS;TT★TT;TW★TW;TY★TY;UG★UG;UK★UK;UR★UR;UZ★UZ;VE★VE;VI★VI;VO★VO;WA★WA;WO★WO;XH★XH;YI★YI;YO★YO;ZA★ZA;ZH★ZH;ZU★ZU;AA★AAR;AA★Afar;AB★ABK;AB★Abkhazian;AE★AVE;AE★Avestan;AF★AFR;AF★Afrikaans;AK★AKA;AK★AKA + 2;AK★Akan;AM★AMH;AM★Amharic;AN★Aragonese;AN★ARG;AR★ARA;AR★ARA + 30;AR★Arabic;AS★ASM;AS★Assamese;AV★AVA;AV★Avaric;AY★AYM;AY★AYM + 2;AY★Aymara;AZ★AZE;AZ★AZE + 2;AZ★Azerbaijani;BA★BAK;BA★Bashkir;BE★BEL;BE★Belarusian;BG★BUL;BG★Bulgarian;BH★BIH;BH★Bihari;BH★None;BI★BIS;BI★Bislama;BM★BAM;BM★Bambara;BN★BEN;BN★Bengali;BO★BOD;BO★TIB;BO★Tibetan;BR★BRE;BR★Breton;BS★BOS;BS★Bosnian;CA★CAT;CA★Catalan;CE★CHE;CE★Chechen;CH★CHA;CH★Chamorro;CO★Corsican;CO★COS;CR★CRE;CR★CRE + 6;CR★Cree;CS★CES;CS★CZE;CS★Czech;CU★CHU;CU★Church Slavic;CV★Chuvash;CV★CHV;CY★CYM;CY★WEL;CY★Welsh;DA★DAN;DA★Danish;DE★DEU;DE★GER;DE★German;DV★DIV;DV★Divehi;DZ★DZO;DZ★Dzongkha;EE★EWE;EL★ELL;EL★GRE;EL★Greek;EN★ENG;EN★English;EO★EPO;EO★Esperanto;ES★Castilian;ES★SPA;ES★Spanish;ET★EST;ET★Estonian;EU★BAQ;EU★Basque;EU★EUS;FA★FAS;FA★FAS + 1;FA★FAS + 2;FA★PER;FA★Persian;FF★FUL;FF★FUL + 9;FF★Fulah;FI★FIN;FI★Finnish;FJ★FIJ;FJ★Fijian;FO★FAO;FO★Faroese;FR★FRA;FR★FRE;FR★French;FY★FRY;FY★FRY + 3;FY★Western Frisian;GA★GLE;GA★Irish;GD★GLA;GD★Scottish Gaelic;GL★Galician;GL★GLG;GN★GRN;GN★GRN + 5;GN★Guaraní;GU★GUJ;GU★Gujarati;GV★GLV;GV★Manx;HA★HAU;HA★Hausa;HE★HEB;HE★Hebrew;HI★HIN;HI★Hindi;HO★Hiri Motu;HO★HMO;HR★Croatian;HR★HRV;HR★SCR;HT★Haitian Creole;HT★HAT;HU★HUN;HU★Hungarian;HY★ARM;HY★Armenian;HY★HYE;HZ★HER;HZ★Herero;IA★INA;IA★Interlingua;IA★International Auxiliary Language Association;ID★IND;ID★Indonesian;IE★ILE;IE★Interlingue;IG★IBO;IG★Igbo;II★III;II★Sichuan Yi;IK★Inupiaq;IK★IPK;IK★IPK + 2;IO★IDO;IS★ICE;IS★Icelandic;IS★ISL;IT★ITA;IT★Italian;IU★IKU;IU★IKU + 2;IU★Inuktitut;JA★Japanese;JA★JPN;JV★JAV;JV★Javanese;KA★GEO;KA★Georgian;KA★KAT;KG★KON;KG★KON + 3;KG★Kongo;KI★KIK;KI★Kikuyu;KJ★KUA;KJ★Kwanyama;KK★KAZ;KK★Kazakh;KL★KAL;KL★Kalaallisut;KM★KHM;KM★Khmer;KN★KAN;KN★Kannada;KO★KOR;KO★Korean;KR★Kanuri;KR★KAU;KR★KAU + 3;KS★KAS;KS★Kashmiri;KU★KUR;KU★KUR + 3;KU★Kurdish;KV★KOM;KV★KOM + 2;KV★Komi;KW★COR;KW★Cornish;KY★KIR;KY★Kirghiz;LA★LAT;LA★Latin;LB★LTZ;LB★Luxembourgish;LG★Ganda;LG★LUG;LI★LIM;LI★Limburgish;LN★LIN;LN★Lingala;LO★LAO;LT★LIT;LT★Lithuanian;LU★LUB;LU★Luba-Katanga;LV★Latvian;LV★LAV;MD★Moldovan;MG★Malagasy;MG★MLG;MG★MLG + 10;MH★MAH;MH★Marshallese;MI★MAO;MI★Māori;MI★MRI;MK★MAC;MK★Macedonian;MK★MKD;ML★MAL;ML★Malayalam;MN★MON;MN★MON + 2;MN★Mongolian;MO★MOL;MO★Moldavian;MR★MAR;MR★Marathi;MS★Malay;MS★MAY;MS★MSA;MS★MSA + 12;MS★MSA + 13;MT★Maltese;MT★MLT;MY★BUR;MY★Burmese;MY★MYA;NA★NAU;NA★Nauru;NB★NOB;NB★Norwegian Bokmål;ND★NDE;ND★North Ndebele;NE★NEP;NE★Nepali;NG★NDO;NG★Ndonga;NL★DUT;NL★Dutch;NL★NLD;NN★NNO;NN★Norwegian Nynorsk;NO★NOR;NO★NOR + 2;NO★Norwegian;NR★NBL;NR★South Ndebele;NV★NAV;NV★Navajo;NY★Chichewa;NY★NYA;OC★Occitan;OC★OCI;OJ★OJI;OJ★OJI + 7;OJ★Ojibwa;OM★ORM;OM★ORM + 4;OM★Oromo;OR★ORI;OR★Oriya;OS★OSS;OS★Ossetian;PA★PAN;PA★Panjabi;PI★Pāli;PI★PLI;PL★POL;PL★Polish;PS★Pashto;PS★PUS;PS★PUS + 3;PT★POR;PT★Portuguese;QU★QUE;QU★QUE + 44;QU★Quechua;RM★Raeto-Romance;RM★ROH;RN★Kirundi;RN★RUN;RO★Romanian;RO★RON;RO★RUM;RU★RUS;RU★Russian;RW★KIN;RW★Kinyarwanda;SA★SAN;SA★Sanskrit;SC★Sardinian;SC★SRD;SC★SRD + 4;SD★Sindhi;SD★SND;SE★Northern Sami;SE★SME;SG★SAG;SG★Sango;SH★HBS ;SH★HBS + 3;SH★Serbo-Croatian;SI★SIN;SI★Sinhalese;SK★SLK;SK★SLO;SK★Slovak;SL★Slovenian;SL★SLV;SM★Samoan;SM★SMO;SN★Shona;SN★SNA;SO★SOM;SO★Somali;SQ★ALB;SQ★Albanian;SQ★SQI;SQ★SQI + 3;SQ★SQI + 4;SR★SCC;SR★Serbian;SR★SRP;SS★SSW;SS★Swati;ST★SOT;ST★Sotho;SU★SUN;SU★Sundanese;SV★SWE;SV★Swedish;SW★SWA;SW★SWA + 2;SW★Swahili;TA★TAM;TA★Tamil;TE★TEL;TE★Telugu;TG★Tajik;TG★TGK;TH★THA;TH★Thai;TI★Tigrinya;TI★TIR;TK★TUK;TK★Turkmen;TL★Tagalog;TL★TGL;TN★TSN;TN★Tswana;TO★TON;TO★Tonga;TR★TUR;TR★Turkish;TS★TSO;TS★Tsonga;TT★TAT;TT★Tatar;TW★TWI;TY★TAH;TY★Tahitian;UG★UIG;UG★Uyghur;UK★UKR;UK★Ukrainian;UR★URD;UR★Urdu;UZ★UZB;UZ★UZB + 2;UZ★Uzbek;VE★VEN;VE★Venda;VI★VIE;VI★Vietnamese;VO★VOL;VO★Volapük;WA★Walloon;WA★WLN;WO★WOL;WO★Wolof;XH★XHO;XH★Xhosa;YI★YID;YI★YID + 2;YI★Yiddish;YO★YOR;YO★Yoruba;ZA★ZHA;ZA★ZHA + 2;ZA★Zhuang;ZH★CHI;ZH★Chinese;ZH★ZHO;ZH★ZHO + 12;ZH★ZHO + 13;ZU★ZUL;ZU★Zulu"
    tmps = str.split(";")
    for line in tmps:
        line = line.strip()
        if len(line) < 3:
            continue
        vec = line.split("★")
        if len(vec) < 2:
            continue
        lan = vec[1].upper().strip()
        code = vec[0].strip()
        if len(lan) < 1:
            continue
        if len(code) < 1:
            continue
        codelanguageMap[lan] = code
    return codelanguageMap


def get_wosderwentpatent_country(text):
    # 使用正则表达式匹配英文字符直到出现数字
    match = re.match(r'^[a-zA-Z]+', text)
    if match:
        return match.group(0)
    else:
        return "UN"


def parse_wosderwentpatent_article(src_data):
    codelanguageMap = initlanguageMap()
    data = {}
    rawid = src_data["GA"]
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "01235"
    product = "DERWENT"
    sub_db = "ZL"
    provider = "CLARIVATE"
    source_type = "2"
    data["is_deprecated"] = "0"
    data["rawid"] = rawid
    data["rawid_alt"] = "{}-{}".format(rawid[0:4],rawid[4:])
    data["rawid_mysql"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://webofscience.clarivate.cn/wos/alldb/full-record/DIIDW:" + rawid
    

    data["title"] = cleanSemicolon(src_data["TI"])
    pub_no = cleanSemicolon(src_data["PN"])
    data["country"] = get_wosderwentpatent_country(pub_no)

    data["pub_no"] = pub_no
    data["author"] = cleanSemicolon(src_data["AU"])
    data["applicant"] = cleanSemicolon(src_data["AE"])
    data["organ"] = cleanSemicolon(src_data["applicant"])
    data["abstract"] = cleanSemicolon(src_data["AB"])
    # "CN117288748-A   26 Dec 2023   G01N-021/78   202405   Chinese"
    pd_info = src_data["PD"].strip().split("   ")
    pub_date = ""
    if len(pd_info) > 1:
        pub_date = datetime.datetime.strptime(pd_info[1], "%d %b %Y").strftime("%Y%m%d")
    data["pub_date"] = pub_date
    ipc_no_1st = ""
    ipc_no = src_data["PD"].strip().replace("; ", ";")
    if len(pd_info) > 2:
        ipc_no_1st = pd_info[2]
    if len(ipc_no) == 0:
        ipc_no = ipc_no_1st
    data["ipc_no_1st"] = ipc_no_1st
    data["ipc_no"] = ipc_no
    week = ""
    page_cnt = ""
    if len(pd_info) > 3:
        if "Pages" in pd_info[2]:
            tmps = pd_info[2].split("Pages:")
            week = tmps[0].strip()
            page_cnt = tmps[1].strip()
    data["week"] = week
    data["page_cnt"] = page_cnt
    language = ""
    if len(pd_info) > 4:
        lgstr = pd_info[4].strip()
        if len(lgstr) > 0:
            lgStrings = lgstr.upper().split(";")
            hashSet = set()
            if len(lgStrings) > 1:
                for lg in lgStrings:
                    tmp = codelanguageMap.get(lg, "")
                    if tmp and len(tmp) > 0:
                        hashSet.add(tmp)
                language = ";".join(hashSet)
            else:
                language = codelanguageMap.get(lgstr.upper(), "")
            if not language:
                language = ""
        else:
            language = ""
    if len(language) == 0:
        language = "UN"
    data["language"] = language
    ad_info = src_data["AD"].strip().split("   ")
    app_no = ""
    if len(ad_info) > 1:
        app_no = pd_info[1]
    data["app_no"] = app_no
    app_date = ""
    if len(ad_info) > 2:
        app_date = datetime.datetime.strptime(pd_info[1], "%d %b %Y").strftime("%Y%m%d")
    data["app_date"] = app_date
    data["priority"] = cleanSemicolon(src_data["PI"])
    data["dc_no"] = cleanSemicolon(src_data["DC"])
    data["mc_no"] = cleanSemicolon(src_data["MC"])
    data["research_field"] = cleanSemicolon(src_data["TF"])
    data["detail_abstract"] = cleanSemicolon(src_data["EA"])
    data["sub_db_class_name"] = cleanSemicolon(src_data["FS"])
    data["register_no"] = cleanSemicolon(src_data["RG"])
    data["ri_no"] = cleanSemicolon(src_data["RI"])
    data["dcr_no"] = cleanSemicolon(src_data["DN"])
    data["markush_no"] = cleanSemicolon(src_data["MN"])

    data["FD"] = cleanSemicolon(src_data["FD"])#更多专利申请信息
    data["DS"] = cleanSemicolon(src_data["DS"])#指定国家/地区:(国家性) 指定国家/地区:(区域性)
    return data


def wosderwentpatent_article_etl_callback(callmodel) -> EtlDealModel:

    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    src_data = down_model["down_dict"]["json"]
    down_date = down_model.down_date
    data = parse_wosderwentpatent_article(src_data)
    data["down_date"] = down_date
    data["latest_date"] = down_date
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "wosderwentpatent_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "wosderwentpatent_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "oversea_other_latest", "data": data})
    result.save_data = save_data
    return result

def getLanguage(lan):
    language = "EN"
    languagelist = lan.split(";")
    if len(languagelist) > 1:
        return language
    slan = languagelist[0].strip()
    if slan in mapLanguage:
        language = mapLanguage.get(slan)
    return language

def getShowOrgan(C1):
    showorgan = ""
    C1 = re.sub(r"\[.+?\]", "",C1)# 去掉中括号
    if ";" not in C1: # 一个机构或空机构
        showorgan = C1.strip()
    else:
        idx = 0
        tmps = C1.split(";")
        for organ in tmps:
            organ = organ.strip()
            idx += 1
            showorgan += "[" + str(idx) + "]" + organ + ";"
    showorgan = re.sub(r";+$", "",showorgan)# 去掉中括号
    return showorgan


def getWriterMap(C1):
    writerMap = {}
    ls = []
    tmplist = re.findall(r"\[(.+?)\]",C1)
    idx = 0
    for tmp in tmplist:
        ls.append(tmp)
        idx += 1
        writerMap[str(idx)] = tmp
    return writerMap


def getShowWriter(AF, C1):
    showwriter = ""
    writerMap = getWriterMap(C1)
    idxList = []
    for writer in AF.split(";"):
        writer = writer.strip()
        idxList.clear()
        for k,v in writerMap.items():
            for writerX in v.split(";"):
                writerX = writerX.strip()
                if writer == writerX:
                    idxList.append(k)
                    break
        idxList.sort()
        idxString = ','.join(idxList)
        if len(idxString) > 0:
            showwriter += writer + "[" + idxString + "];"
        else:
            showwriter += writer + ";"
    showwriter = re.sub(r";+$", "",showwriter)
    return showwriter


async def getQkInfo(redis_conn,issn,eissn,journal_name):
    journal_name = re.sub(r'[^a-zA-Z0-9]', '', journal_name)
    qk_info = None
    if len(issn) > 0:
        qk_info = await redis_conn.hget("qk_info", issn)
    if qk_info is None and len(eissn) > 0:
        qk_info = await redis_conn.hget("qk_info", eissn)
    if qk_info is None:
        qk_info = await redis_conn.hget("qk_info", "")
    if qk_info:
        qk_info = json.loads(qk_info)
        if len(qk_info) == 1:
            qk_info = list(qk_info.values())[0]
        elif len(qk_info) > 1:
            qk_info = qk_info.get(journal_name)
    return qk_info


mapMonth = {
    "JAN": "01",
    "FEB": "02",
    "MAR": "03",
    "APR": "04",
    "MAY": "05",
    "JUN": "06",
    "JUL": "07",
    "AUG": "08",
    "SEP": "09",
    "OCT": "10",
    "NOV": "11",
    "DEC": "12"
}


mapLib = {
    "SCI": "SCIE",
    "SSCI": "SSCI",
    "AHCI": "AHCI",
    "ISTP": "CPCIS",
    "ISSHP": "CPCISSH",
    "ESCI": "ESCI",
    "CCR": "CCRE",
    "IC": "IC"
}

mapLanguage = {
    "English":"EN",
    "Arabic":"AR",
    "Byelorussian":"BE",
    "Bulgarian":"BG",
    "Catalan":"CA",
    "Czech":"CS",
    "Danish":"DA",
    "German":"DE",
    "Greek":"EL",
    "Spanish":"ES",
    "Estonian":"ET",
    "Finnish":"FI",
    "French":"FR",
    "Croatian":"HR",
    "Magyar":"HU",
    "Icelandic":"IS",
    "Italian":"IT",
    "Hebrew":"IW",
    "Japanese":"JA",
    "Korean":"KO",
    "Lithuanian":"LT",
    "Latvian":"LV",
    "Macedonian":"MK",
    "Dutch":"NL",
    "Norwegian":"NO",
    "Polish":"PL",
    "Portuguese":"PT",
    "Rumanian":"RO",
    "Russian":"RU",
    "Croatian":"SH",
    "Slovak":"SK",
    "Slovene":"SL",
    "Albanian":"SQ",
    "Serbian":"SR",
    "Swedish":"SV",
    "Thai":"TH",
    "Turkish":"TR",
    "Ukrainian":"UK",
    "Chinese":"ZH",
}


def wosjournal_woskeywordsitem_article_etl_callback(callmodel) -> EtlDealModel:

    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    redis_conn = para_dicts["redis"]
    down_model = callmodel.down_model.down_dict
    src_data = down_model["down_dict"]["json"]
    down_date = down_model.down_date
    src_data["down_date"] = down_date
    data = parse_wosjournal_woskeywordsitem_article(src_data,redis_conn)
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data == -1):
        err_msg = "wosjournal_woskeywordsitem_article_etl_callback 解析原始UT字段没有WOS"
    elif len(data == -2):
        err_msg = "wosjournal_woskeywordsitem_article_etl_callback 解析解析子库类别mapLib出错"
    elif len(data == -3):
        err_msg = "wosjournal_woskeywordsitem_article_etl_callback 该条原始数据不是期刊资源类型"
    elif len(data == -4):
        err_msg = "wosjournal_woskeywordsitem_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "wosjournal_woskeywordsitem_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    data["down_date"] = down_date
    data["latest_date"] = down_date
    save_data = []
    save_data.append({"table": "oversea_meta_latest", "data": data})
    result.save_data = save_data
    return result


def wosjournal_woskeywordsitem_article_ref_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    down_date = down_model.down_date
    meta_info = para_dicts["meta_info"]
    source_type = "3"
    sub_db_id = "00854"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    ref_data = {}
    ref_data["batch"] = batch
    #来自题录
    ref_data["lngid"] = meta_info["lngid"]
    ref_data["keyid"] = meta_info["keyid"]
    ref_data["down_date"] = down_date
    ref_data["rawid"] = "WOS:" + meta_info["rawid"]
    ref_data["pub_year"] = meta_info["pub_year"]
    ref_data["is_deprecated"] = "0"
    ref_data["source_type"] = source_type
    ref_data["sub_db_id"] = sub_db_id

    ref_data = parse_wosjournal_ref(ref_data,down_dict)
    status = "FAILED"
    err_msg = ""
    code = 7
    if "ref_cnt" not in ref_data:
        err_msg = "wosjournal_woskeywordsitem_article_ref_etl_callback 解析错误，没有解析出引文"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "oversea_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def woscoreconference_article_ref_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts 
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    down_date = down_model.down_date
    meta_info = para_dicts["meta_info"]
    source_type = "6"
    sub_db_id = "00855"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    ref_data = {}
    ref_data["batch"] = batch
    ref_data["lngid"] = meta_info["lngid"]
    ref_data["rawid"] = "WOS:" + meta_info["rawid"]
    ref_data["down_date"] = down_date
    ref_data["keyid"] = meta_info["keyid"]
    ref_data["is_deprecated"] = "0"
    ref_data["source_type"] = source_type
    ref_data["sub_db_id"] = sub_db_id
    ref_data["pub_year"] = meta_info["pub_year"]
    ref_data = parse_wosjournal_ref(ref_data, down_dict)
    status = "FAILED"
    err_msg = ""
    code = 7
    if "ref_cnt" not in ref_data:
        err_msg = "woscoreconference_article_ref_etl_callback 解析错误，没有解析出引文"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "oversea_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def scopusjournal_scopuskeywordsitem_article_ref_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    down_date = down_model.down_date
    meta_info = para_dicts["meta_info"]
    source_type = "3"
    sub_db_id = "00164"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    ref_data = {}
    ref_data["batch"] = batch
    #来自题录
    ref_data["lngid"] = meta_info["lngid"]
    ref_data["keyid"] = meta_info["keyid"]
    ref_data["down_date"] = down_date
    ref_data["rawid"] = meta_info["rawid"]
    ref_data["pub_year"] = meta_info["pub_year"]
    ref_data["is_deprecated"] = "0"
    ref_data["source_type"] = source_type
    ref_data["sub_db_id"] = sub_db_id

    ref_data = parse_scopusjournal_ref(ref_data,down_dict)
    status = "FAILED"
    err_msg = ""
    code = 7
    if "ref_cnt" not in ref_data:
        err_msg = "scopusjournal_scopuskeywordsitem_article_ref_etl_callback 解析错误，没有解析出引文"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "oversea_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def scopusconference_article_ref_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    down_date = down_model.down_date
    meta_info = para_dicts["meta_info"]
    source_type = "6"
    sub_db_id = "00165"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    ref_data = {}
    ref_data["batch"] = batch
    #来自题录
    ref_data["lngid"] = meta_info["lngid"]
    ref_data["keyid"] = meta_info["keyid"]
    ref_data["rawid"] = meta_info["rawid"]
    ref_data["down_date"] = down_date
    ref_data["pub_year"] = meta_info["pub_year"]
    ref_data["is_deprecated"] = "0"
    ref_data["source_type"] = source_type
    ref_data["sub_db_id"] = sub_db_id

    ref_data = parse_scopusjournal_ref(ref_data,down_dict)
    status = "FAILED"
    err_msg = ""
    code = 7
    if "ref_cnt" not in ref_data:
        err_msg = "scopusconference_article_article_ref_etl_callback 解析错误，没有解析出引文"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "oversea_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def scopusjournal_article_etl_callback(callmodel) -> EtlDealModel:

    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    redis_conn = para_dicts["redis"]
    down_model = callmodel.down_model.down_dict
    src_data_csv = down_model["down_dict"]["csv"]
    down_date = down_model.down_date
    src_data_csv["down_date"] = down_date
    data = parse_scopusjournal_article_csv(src_data_csv,redis_conn)
    status = "FAILED"
    err_msg = ""
    code = 7
    if data == -1:
        err_msg = "scopusjournal_article_etl_callback 该条原始数据不是期刊资源类型"
    elif data == -2:
        err_msg = "scopusjournal_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1 or data == -3:
        err_msg = "scopusjournal_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    data["down_date"] = down_date
    data["latest_date"] = down_date
    save_data = []
    save_data.append({"table": "oversea_meta_latest", "data": data})
    result.save_data = save_data
    return result


def eijournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    redis_conn = para_dicts["redis"]
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    src_data_csv = down_dict["csv"]
    down_date = down_model.down_date
    src_data_csv["down_date"] = down_date
    src_data_csv["rawid_alt"] = down_dict.get("txt","")
    data = parse_scopusjournal_article_csv(src_data_csv,redis_conn)
    status = "FAILED"
    err_msg = ""
    code = 7
    if data == -1:
        err_msg = "eijournal_article_etl_callback 该条原始数据不是期刊资源类型"
    elif data == -2:
        err_msg = "eijournal_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "eijournal_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    data["down_date"] = down_date
    data["latest_date"] = down_date
    save_data = []
    save_data.append({"table": "oversea_meta_latest", "data": data})
    result.save_data = save_data
    return result


def eiconference_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    redis_conn = para_dicts["redis"]
    down_model = callmodel.down_model.down_dict
    down_dict = down_model["down_dict"]
    src_data_csv = down_dict["csv"]
    down_date = down_model.down_date
    src_data_csv["down_date"] = down_date
    src_data_csv["rawid_alt"] = down_dict.get("txt","")
    data = parse_scopusjournal_article_csv(src_data_csv,redis_conn)
    status = "FAILED"
    err_msg = ""
    code = 7
    if data == -1:
        err_msg = "eiconference_article_etl_callback 该条原始数据不是期刊资源类型"
    elif data == -2:
        err_msg = "eiconference_article_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "eiconference_article_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    data["down_date"] = down_date
    data["latest_date"] = down_date
    save_data = []
    save_data.append({"table": "oversea_meta_latest", "data": data})
    result.save_data = save_data
    return result


