import datetime
import json
import math
import re
import time
import traceback
from copy import deepcopy

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basefile import BaseFile
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.baselibrary.utils.core.mdeprecated import retry_func_async, try_except2_async
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_callback.err_callback import all_client_back_callback
from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, JournalListModel, JournalIssueModel, \
    JournalArticleModel, DealInsertModel, JournalHomeModel, DealUpdateModel, OperatorSqlModel, EtlDealModel, HtmlModel

__all__ = ["para_key_callback",
           "cnkijournal_cnkijournallist_callback",
           "cnkijournal_cnkiissue_callback",
           "cnkijournal_cnkinet1st_callback",
           "cnkijournal_cnkinet1stall_callback",
           "cnkijournal_cnkinet1styear_callback",
           "cnkijournal_cnkinet1starticle_callback",
           "cnkijournal_cnkiarticle_callback",
           "cnkijournal_cnkiclasshome_callback",
           "cnkijournal_cnkiqkhome_callback",
           "cnkijournal_cnkiqkclasshome_callback",
           "cnkijournal_cnkijclasshome_callback",
           "cnkijournal_cnkijqkclasshome_callback",
           "cnkijournal_cnkiqkhomeinit_callback",
           "cnkijournal_cnkinet1starticle_etl_callback",
           "cnkijournal_cnkiarticle_ref_etl_callback",
           "cnkicfjd_cnkicfjdhome_callback",
           "cnkicjfx_cnkicjfxhome_callback",
           "cnkicjfr_cnkicjfrhome_callback",
           "cnkijournal_cnkiarticle_etl_callback",
           "down_cnki_article_html",
           "down_ref_requests",
           "deal_nxgpdata_request"]


def deal_nxgpdata_request():
    request_js = BaseFile.single_read_file(r"cnkiarticle_nxgpdata-request_1_3_ref_need.txt")
    import re
    pattern = "param:'\"references\", \"(.*?)\", "
    # 执行匹配和提取操作
    matches = re.findall(pattern, request_js)
    if matches:
        return matches
    else:
        BaseFile.remove_file(r"cnkiarticle_nxgpdata-request_1_3_ref_need.txt")
        raise Exception("js 下载错误，或者解析规则错误，请检查")


@try_except2_async(callback=all_client_back_callback, is_print=True)
@retry_func_async(retry_times=3, sleep_time=1)
async def down_ref_requests(self):
    # 请求引文需要参数，由于是js静态文件，每天请求一次
    from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
    from apps.crawler_platform.util.proxyhelper import ProxyHelper
    print("*************************")
    this_header = {
        "Accept": "*/*",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
        "Host": "piccache.cnki.net",
        "Referer": "https://kns.cnki.net/kcms2/article/abstract?"
    }
    url = "https://piccache.cnki.net/2022/nxgp/kcms/20230625r/js/apidata/nxgpdata-request.js"
    rrq = AioHttpRequest()
    rrq.set_url(url) \
        .set_header(this_header) \
        .set_timeout(30) \
        .set_allow_resp_text(True) \
        .set_allow_resp_bytes(False)
    pm = self.pm
    rrq.set_proxy(pm.proxy)
    Lists_middler = []
    dicts = {"status_code_middlerwares": {"status_code": 200}}
    rrq.set_middler_para(dicts)
    for k, v in dicts.items():
        Lists_middler.append(eval("rrq." + k))
    rrq.set_marks(["lazyLoadObj"])
    Lists_middler.append(rrq.marks_middlerwares)
    rrq.set_middler_list(Lists_middler)
    bools, dicts = await rrq.run("get")
    print(bools, dicts)
    if bools:
        html = rrq.html
        BaseFile.single_write_file(r"cnkiarticle_nxgpdata-request_1_3_ref_need.txt", html)
        return bools, dicts
    return False, dicts


@try_except2_async(callback=all_client_back_callback, is_print=True)
@retry_func_async(retry_times=3, sleep_time=1)
async def down_cnki_article_html(self):
    # 下载搜索文章的页面 现在改版弃用 20230711
    from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
    from apps.crawler_platform.util.proxyhelper import ProxyHelper
    rrq = AioHttpRequest()
    pm = self.pm
    strs = pm.sql_model.article_info_json
    dicts = json.loads(strs)
    dbcode = dicts["dbcode"]
    dbname = dicts["dbname"]
    filename = dicts["filename"]
    url = f"https://kns.cnki.net/kcms/detail/detail.aspx?dbcode={dbcode}&dbname={dbname}&filename={filename}&uniplatform=NZKPT&v="
    # print(url)
    this_header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Host': 'kns.cnki.net',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    rrq.set_url(url) \
        .set_timeout(30) \
        .set_header(this_header) \
        .set_allow_resp_text(True) \
        .set_allow_resp_bytes(False)
    rrq.set_proxy(pm.proxy)
    Lists_middler = []
    dicts = {"status_code_middlerwares": {"status_code": 200}, "have_end_middlerwares": {"havestring": "</html>"}}
    rrq.set_middler_para(dicts)
    for k, v in dicts.items():
        Lists_middler.append(eval("rrq." + k))
    rrq.set_marks(["head-main"])
    Lists_middler.append(rrq.marks_middlerwares)
    rrq.set_middler_list(Lists_middler)

    bools, dicts = await rrq.run("get")
    if bools:
        html = rrq.html
        pm.down_model.down_dict["1_1"].state_dict["search_html"] = html
        from re_common.baselibrary.tools.myparsel import MParsel
        mc = MParsel(html=html)

        cnki_pare_dicts_4 = {
            'vl': 'input[id="listv"]::attr(value)'
        }

        cnki_css_pare_4 = {
            "type": "CSS",
            "rules": cnki_pare_dicts_4
        }
        new_dict = mc.css_parsel(sel=mc.sel, css_selector=cnki_css_pare_4)
        from apps.core.callback import default_call_back
        BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
        # print(new_dict)
        pm.cookies = rrq.cookies
        return True, new_dict
    else:
        await ProxyHelper.get_proxy(self)
        print(bools, dicts)
        return bools, dicts


def para_key_callback(dicts):
    if isinstance(dicts, dict):
        for k, v in dicts.items():
            if not isinstance(v, (list, tuple, dict)):
                if k == "key_name":
                    v = v.replace("：", "").replace(":", "").strip()
                    v = re.sub(r"\(\d+版*\)", "", v)
                    dicts[k] = v
            else:
                para_key_callback(v)
    elif isinstance(dicts, (list, tuple)):
        for i in range(len(dicts)):
            para_key_callback(dicts[i])

def cnkijournal_cnkijournallist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    journal_json_dicts = {}
    dicts = {
        "目前状态": "state",
        "曾用刊名": "journal_name_used",
        "主办单位": "publisher",
        "出版周期": "type_name",
        "ISSN": "issn",
        "CN": "cnno",
        "出版地": "pub_place",
        "语种": "language",
        "开本": "book_size",
        "邮发代号": "sem_code",
        "创刊时间": "create_date",
        "专辑名称": "special_name",
        "专题名称": "subject_name",
        "来源数据库": "source_db",
        "出版文献量": "article_count",
        "总下载次数": "down_cnt",
        "总被引次数": "cited_cnt",
        "复合影响因子": "cnki_impact_fh_1",
        "综合影响因子": "cnki_impact_zh_1",
        "journal_name_alt": "journal_name_alt",
        "evaluate_info": "evaluate_info"
    }
    base_time = BaseTime().get_beijin_date_strins()
    batch = base_time[0:8] + "_" + base_time[-6:]
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info

    journal_rawid = callmodel.sql_model.journal_rawid
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "journal_rawid": journal_rawid,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    para_dicts = para_dicts["data"]
    time = para_dicts["1_1"]["time"]
    journal_json_dicts["time"] = time

    para_key_callback(para_dicts)

    if "1_1" in para_dicts:

        pCode = para_dicts["1_1"]["pCode"]
        # print(para_dicts)
        journalType_str = ""
        for k, v in para_dicts["1_1"].items():
            if k in ["cnki_exclusive", "individual_issue", "first_launch"]:
                if k == "individual_issue":
                    if v == "":
                        v = 0
                    else:
                        if v.find("个刊发行") > -1:
                            v = 1
                        else:
                            v = 0
                if k == "first_launch":
                    if v == "":
                        v = 0
                    else:
                        if v.find("网络首发") > -1:
                            v = 1
                        else:
                            v = 0
                if k == "cnki_exclusive":
                    if v == "":
                        v = 0
                    else:
                        v = 1

                journal_json_dicts[k] = v
            elif k == "qk_info":
                for item in v["children"]:
                    k = item["key_name"]
                    v = item["value"]
                    if k in dicts.keys():
                        k = dicts[k]
                        journal_json_dicts[k] = v
                    else:
                        result.other_dicts.update({k: v})
            elif k == "journal_type":
                html = v
                selector = Selector(text=html)
                journalType = selector.css('p[class="journalType journalType2"] span ::text').getall()
                journalType_str = ";".join([i.strip() for i in journalType])
            else:
                if k in dicts.values():
                    journal_json_dicts[k] = v
                else:
                    result.other_dicts.update({k: v})

        journal_json_dicts["journalType"] = journalType_str
        for k in dicts.keys():
            if k not in journal_json_dicts.keys():
                result.null_dicts.update({k: ""})

        evaluate_info = journal_json_dicts["evaluate_info"]
        list1 = re.findall(r"\((\d{4})版?\)复合影响因子", evaluate_info)
        list2 = re.findall(r"\((\d{4})版?\)综合影响因子", evaluate_info)

        if list1:
            year = list1[0]
            if journal_json_dicts.get("cnki_impact_fh") or journal_json_dicts.get("cnki_impact_fh_1"):
                impact_fh_temp = journal_json_dicts.get("cnki_impact_fh_1") or journal_json_dicts.get("cnki_impact_fh")
                journal_json_dicts["cnki_impact_fh_1"] = str(impact_fh_temp) + "@" + str(year)
        if list2:
            year = list2[0]
            if journal_json_dicts.get("cnki_impact_zh") or journal_json_dicts.get("cnki_impact_zh_1"):
                impact_zh_temp = journal_json_dicts.get("cnki_impact_zh_1") or journal_json_dicts.get("cnki_impact_zh")
                journal_json_dicts["cnki_impact_zh_1"] = str(impact_zh_temp) + "@" + str(year)



    # 为了获得最新年份,在此直接取1_2里面的数据
    if "1_2" in para_dicts:
        collect_newyear_dict = para_dicts["1_2"]["year_issue"]["children"][0]
        collect_newyear = collect_newyear_dict["year"]
        journal_json_dicts["collect_newyear"] = collect_newyear
        journal_json_dicts["after_batch"] = batch
        issue_json = {}
        insert = DealInsertModel()
        insert.insert_pre = CoreSqlValue.insert_ig_it
        for item in para_dicts["1_2"]["year_issue"]["children"]:
            year = item["year"]
            issue_list = item["issue"].split(";")
            issue_value_list = item["issue_value"].split(";")
            for i, j in zip(issue_list, issue_value_list):
                num = i[len('No.'):]
                issue_value = j
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"].split(";")[0]
                del temp["task_tag_next"]
                issue_json["issue_value"] = issue_value
                issue_json["pCode"] = pCode
                issue_json["before_batch"] = batch

                temp.update(
                    {'pub_year': year, 'num': num, 'issue_json': json.dumps(issue_json, ensure_ascii=False)})
                insert.lists.append(temp)
                dum = DealUpdateModel()
                dum.update = {
                    'issue_json': temp['issue_json']
                }
                dum.where = {
                    "task_name": temp['task_name'],
                    "task_tag": temp['task_tag'],
                    "journal_rawid": temp['journal_rawid'],
                    "pub_year": temp['pub_year'],
                    "num": temp['num']
                }
                result.next_dicts.update_list.append(dum)
        result.next_dicts.insert.append(insert)
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
        temp["sub_db_id"] = "00393"
        del temp["task_tag_next"]
        temp.update({'pub_year': "", 'num': "", 'issue_json': json.dumps(issue_json, ensure_ascii=False)})
        insert = DealInsertModel()
        insert.lists.append(temp)
        # 这里是网络首发，这里用ig，不覆盖状态
        insert.insert_pre = CoreSqlValue.insert_ig_it
        result.next_dicts.insert.append(insert)
        dum = DealUpdateModel()
        dum.update = {
            'issue_json': temp['issue_json']
        }
        dum.where = {
            "task_name": temp['task_name'],
            "task_tag": temp['task_tag'],
            "journal_rawid": temp['journal_rawid'],
            "pub_year": temp['pub_year'],
            "num": temp['num']
        }

        result.next_dicts.update_list.append(dum)
    qk_dicts = json.loads(callmodel.sql_model.journal_json)
    qk_dicts.update(journal_json_dicts)
    result.befor_dicts.update['journal_json'] = json.dumps(qk_dicts, ensure_ascii=False)
    # result.befor_dicts.update({"journal_json": json.dumps(qk_dicts, ensure_ascii=False)})
    return result





def cnkijournal_cnkiissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    rule = re.compile(r"&filename=(.*)")
    result = DealModel()
    para_dicts = callmodel.para_dicts

    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "journal_rawid": sql_model.journal_rawid,
                  "pub_year": sql_model.pub_year,
                  "num": sql_model.num,
                  "sub_db_id": sql_model.sub_db_id
                  }

    base_time = BaseTime().get_beijin_date_strins()
    batch = base_time[0:8] + "_" + base_time[-6:]
    issue_json = sql_model.issue_json
    issue_json = json.loads(issue_json)
    issue_json["after_batch"] = batch
    articleCount = para_dicts["data"]["1_1"]["articleCount"]
    result.befor_dicts.update["articlecount"] = articleCount
    result.befor_dicts.update["page"] = "1"
    result.befor_dicts.update["issue_json"] = json.dumps(issue_json, ensure_ascii=False)
    list_col = para_dicts["data"]["1_1"]["issue_info"]["children"]
    insert = DealInsertModel()
    # 从replace修改为先ignore, 再update
    insert.insert_pre = CoreSqlValue.insert_ig_it
    title_list = list()
    title_rawid_sets = set()
    for item_temp in list_col:
        for item_temp1 in item_temp["children"]:
            title = item_temp1["title"]
            title_list.append(title)
    result_t = {}
    for key in title_list:
        result_t[key] = result_t.get(key, 0) + 1
    for k, v in result_t.items():
        if v > 1:
            title_rawid_sets.add(k)

    for item in list_col:
        column_info = item["col"]
        for item1 in item["children"]:
            url = item1["url"]
            dictss = BaseUrl.urlQuery2Dict(url)
            value = dictss["v"]  # 一个新的id

            title = item1["title"]
            page_info = item1["pageline"]
            author = item1["author"]
            journalshares = item1["journalshares"]
            filenames = rule.findall(journalshares)
            try:
                filename = filenames[0].split("&")[0]
            except:
                # send_user_err("cnkijournal_cnkiissue_callback", {"item": item1, "info_dicts": info_dicts})
                # 文章有撤回  会出现没有  filename 的情况
                filename = ""
            # 改了一次版 20230208 可以直接获取filename,journalshares后面可以去掉
            if filename == "":
                filename = item1["filename"]
            if_html_fulltext = item1["if_html_fulltext"]
            if if_html_fulltext == 'HTML阅读':
                if_html_fulltext = 1
            else:
                if_html_fulltext = 0
            temp = info_dicts.copy()
            temp["value"] = value
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            temp["column_info"] = column_info
            temp["url"] = url
            temp["author"] = author
            temp["title"] = title
            temp["page_info"] = page_info
            temp["if_html_fulltext"] = str(if_html_fulltext)
            temp["filename"] = filename
            temp["before_batch"] = batch

            new_temp = {}
            new_temp["rawid"] = ",".join([temp["journal_rawid"], temp["pub_year"], temp["num"], temp["title"]])
            if temp["title"] in title_rawid_sets:
                new_temp["rawid"] = ",".join(
                    [temp["journal_rawid"], temp["pub_year"], temp["num"], temp["title"], temp["filename"]])
            new_temp["task_tag"] = temp["task_tag"]
            new_temp["task_name"] = temp["task_name"]
            new_temp["sub_db_id"] = temp["sub_db_id"]
            del temp["task_tag"]
            del temp["task_name"]
            del temp["sub_db_id"]

            new_temp["article_info_json"] = json.dumps(temp, ensure_ascii=False)
            insert.lists.append(new_temp)
            du_model = DealUpdateModel()
            # du_model.update = {
            #     'article_info_json': json.dumps(temp, ensure_ascii=False)
            # }
            duplicte = json_update(temp)
            du_model.update_no_placeholder.update(
                {"article_info_json": f"JSON_SET(article_info_json, {duplicte})"})

            du_model.where = {
                "task_name": new_temp['task_name'],
                "task_tag": new_temp['task_tag'],
                "rawid": new_temp['rawid']
            }
            result.next_dicts.update_list.append(du_model)

    result.next_dicts.insert.append(insert)

    # result.next_dicts["insert_ig"]["lists"].append(new_temp)

    return result


def cnkijournal_cnkinet1stall_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    dealmodel = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag_next,
                  "journal_rawid": sql_model.journal_rawid,
                  "sub_db_id": sql_model.sub_db_id
                  }

    page_dicts = callmodel.para_dicts["data"]["1_1"]
    totalRow = callmodel.para_dicts["data"]["1_1"]["input"]
    if totalRow == "":
        totalRow = 0
    pageTotal = math.ceil(int(totalRow) / 20)
    page_all = pageTotal - 1
    base_time = BaseTime().get_beijin_date_strins()
    batch = base_time[0:8] + "_" + base_time[-6:]
    issue_json = sql_model.issue_json
    issue_json = json.loads(issue_json)
    issue_json["after_batch"] = batch
    dealmodel.befor_dicts.update["issue_json"] = json.dumps(issue_json, ensure_ascii=False)
    page_index = int(sql_model.page_index)
    turn_page = task_info.turn_page
    journal_rawid = info_dicts["journal_rawid"]
    if (turn_page == 7 and page_index == 0) or (turn_page == 8 and page_index == 1):
        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        for page in range(page_index, pageTotal):
            sql_dict["page"] = page_all
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
        dealmodel.befor_dicts.insert.append(di_model_bef)
    # for page1, item_dict in page_dicts.items():
    # articlecount = item_dict["input"]
    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it

    for items in page_dicts["article"]["children"]:
        url = items["href"]
        author = items["author"]
        title = items["title"]
        dictss = BaseUrl.urlQuery2Dict(url)
        value = dictss["v"]  # 一个新的id
        items["value"] = value
        items["journal_rawid"] = journal_rawid
        items["before_batch"] = batch
        items["author"] = author
        temp_info = info_dicts.copy()
        del temp_info["journal_rawid"]
        temp_info["rawid"] = ",".join([journal_rawid, title])
        temp_info["article_info_json"] = json.dumps(items, ensure_ascii=False)
        # insert = DealInsertModel()
        d_i_model.lists.append(temp_info)
        # insert.insert_pre = CoreSqlValue.insert_ig_it
        # dealmodel.next_dicts.insert.append(insert)
        du_model = DealUpdateModel()
        duplicte = json_update(items)
        du_model.update_no_placeholder.update(
            {"article_info_json": f"JSON_SET(article_info_json, {duplicte})"})
        # du_model.update = {
        #     'article_info_json': json.dumps(items, ensure_ascii=False)
        # }

        du_model.where = {
            "task_name": temp_info['task_name'],
            "task_tag": temp_info['task_tag'],
            "rawid": temp_info['rawid']
        }
        dealmodel.next_dicts.update_list.append(du_model)

    dealmodel.next_dicts.insert.append(d_i_model)
    dealmodel.befor_dicts.update["page"] = str(page_all)
    dealmodel.befor_dicts.update["articlecount"] = str(totalRow)

    return dealmodel


def cnkijournal_cnkinet1styear_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    dealmodel = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag_next,
                  "journal_rawid": sql_model.journal_rawid,
                  "sub_db_id": sql_model.sub_db_id
                  }

    page_dicts = callmodel.para_dicts["data"]["1_1"]
    totalRow = callmodel.para_dicts["data"]["1_1"]["input"]
    if totalRow == "":
        totalRow = 0
    pageTotal = math.ceil(int(totalRow) / 20)
    # 因为是0页开始的， 数据库里面 page的定义为所达到的最大页数, 若共5页, 因为从0 开始, 0 1 2 3 4 因此最大page为4
    page_all = pageTotal - 1
    base_time = BaseTime().get_beijin_date_strins()
    batch = base_time[0:8] + "_" + base_time[-6:]
    issue_json = sql_model.issue_json
    issue_json = json.loads(issue_json)
    issue_json["after_batch"] = batch
    dealmodel.befor_dicts.update["issue_json"] = json.dumps(issue_json, ensure_ascii=False)
    page_index = int(sql_model.page_index)
    turn_page = task_info.turn_page
    journal_rawid = info_dicts["journal_rawid"]
    if (turn_page == 7 and page_index == 0) or (turn_page == 8 and page_index == 1):
        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        for page in range(page_index, pageTotal):
            sql_dict["page"] = page_all
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
        dealmodel.befor_dicts.insert.append(di_model_bef)

    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it

    for items in page_dicts["article"]["children"]:
        url = items["href"]
        author = items["author"]
        title = items["title"]
        dictss = BaseUrl.urlQuery2Dict(url)
        value = dictss["v"]  # 一个新的id
        items["journal_rawid"] = journal_rawid
        items["value"] = value
        items["author"] = author
        items["before_batch"] = batch
        temp_info = info_dicts.copy()
        del temp_info["journal_rawid"]
        temp_info["rawid"] = ",".join([journal_rawid, title])
        temp_info["article_info_json"] = json.dumps(items, ensure_ascii=False)

        d_i_model.lists.append(temp_info)
        du_model = DealUpdateModel()
        duplicte = json_update(items)
        du_model.update_no_placeholder.update(
            {"article_info_json": f"JSON_SET(article_info_json, {duplicte})"})
        # du_model.update = {
        #     'article_info_json': json.dumps(items, ensure_ascii=False)
        # }
        du_model.where = {
            "task_name": temp_info['task_name'],
            "task_tag": temp_info['task_tag'],
            "rawid": temp_info['rawid']
        }
        dealmodel.next_dicts.update_list.append(du_model)
    dealmodel.next_dicts.insert.append(d_i_model)
    dealmodel.befor_dicts.update["page"] = str(page_all)
    dealmodel.befor_dicts.update["articlecount"] = str(totalRow)

    return dealmodel


def cnkijournal_cnkinet1st_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    issue_json = json.loads(sql_model.issue_json)
    try:
        pCode = issue_json["pCode"]
    except:
        # 因为部分情况下会有
        pCode = ""
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "journal_rawid": sql_model.journal_rawid,
                  "sub_db_id": sql_model.sub_db_id
                  }

    page_dicts = callmodel.para_dicts["data"]["1_1"]
    year_1st = page_dicts["year_1st"]
    # dim = DealItemModel()
    dinm = DealInsertModel()
    # 更改为先插入后更新
    dinm.insert_pre = CoreSqlValue.insert_ig_it
    for item in year_1st.split(";"):
        temp = info_dicts.copy()
        temp['pub_year'] = item
        temp['num'] = ''
        temp["page_index"] = 0
        temp['issue_json'] = json.dumps({'pCode': pCode}, ensure_ascii=False)
        if item == '全部':
            temp['task_tag'] = task_info.task_tag_next.split(";")[0]
            temp['pub_year'] = ''
        else:
            temp['task_tag'] = task_info.task_tag_next.split(";")[-1]
        dinm.lists.append(temp)
        du_model = DealUpdateModel()
        duplicte = json_update({'pCode': pCode})
        du_model.update_no_placeholder.update(
            {"issue_json": f"JSON_SET(issue_json, {duplicte})"})

        du_model.where = {
            "task_name": temp['task_name'],
            "task_tag": temp['task_tag'],
            "journal_rawid": temp['journal_rawid'],
            "pub_year": temp["pub_year"]
        }
        result.befor_dicts.update_list.append(du_model)

    # dim.insert.append(dinm)
    result.befor_dicts.insert.append(dinm)
    return result


def cnkijournal_cnkinet1starticle_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    # 注意 这里传入的是down_model
    html_dicts = callmodel.para_dicts
    try:
        dicts = html_dicts["data"]["1_1"]
        dicts["kcmslink"] = dicts["kcmslink"].split("&")[0].replace("v=", "")
        dicts["customlink"] = dicts["customlink"].split("&")[0].replace("doc-list-recVideo:v=", "")
        duplicte = json_update(dicts)
        result.befor_dicts.update_no_placeholder.update(
            {"article_info_json": f"JSON_SET(article_info_json, {duplicte})"})
    except:
        result.befor_dicts.update["err_msg"] = traceback.format_exc()
        result.befor_dicts.update_no_placeholder.update({"failcount": "failcount+1"})
    return result


def cnkijournal_cnkiarticle_callback(callmodel: CallBackModel[JournalArticleModel]) -> DealModel:
    result = DealModel()
    article_info_json = callmodel.sql_model.article_info_json
    article_info_dict = json.loads(article_info_json)
    # 解析返回的数据
    p_data = callmodel.para_dicts["data"]
    result.code_dicts = {}
    if "1_1" in p_data:
        dicts = p_data["1_1"]
        dicts["kcmslink"] = dicts["kcmslink"].split("&")[0].replace("v=", "")
        dicts["customlink"] = dicts["customlink"].split("&")[0].replace("doc-list-recVideo:v=", "")
        dicts["citationapivv"] = dicts["citationapivv"]
        dicts["citationapiclientId"] = dicts["citationapiclientId"]
        dicts["filename"] = dicts["filename"]
        dicts["size"] = 10  # 引文下载需要
        dicts["start"] = 1  # 引文下载需要
        dicts["type"] = ""  # 引文下载需要
        article_info_dict.update(dicts)
    if "1_2" in p_data:
        info_str = p_data["1_2"]["html"]
        # print(info_str)
        info = json.loads(info_str)
        REFERENCE = 0
        CITING = 0
        if "REFERENCE" in info.keys():
            REFERENCE = info["REFERENCE"]
            CITING = info["CITING"]
        else:
            for item in info["data"]:
                if item["name"] == "references":
                    REFERENCE = item["value"]
                if item["name"] == "citations":
                    CITING = item["value"]
        article_info_dict.update({"refcount": REFERENCE, "citecount": CITING})
    if "1_3" in p_data:
        pass
    if "1_4" in p_data:
        pass

    result.befor_dicts.update["article_info_json"] = json.dumps(article_info_dict, ensure_ascii=False)
    return result


def cnkijournal_cnkiqkhomeinit_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data["lblPageCount"]
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        d_i_model = DealInsertModel()
        d_i_model.insert_pre = CoreSqlValue.insert_ig_it
        for i in range(1, int(total_page) + 1):
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"].split(";")[0]
            temp["page"] = total_page
            temp["home_rawid"] = "None"
            temp["page_index"] = i
            temp["home_json"] = "{}"
            del temp["task_tag_next"]
            d_i_model.lists.append(temp)
        result.befor_dicts.update.update({'is_active': 1, "page": total_page})
        result.befor_dicts.insert.append(d_i_model)
    return result


def cnkijournal_cnkiclasshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    start_code = ""
    start_name = ""
    for k, v in para_dicts["data"].items():
        if k == "1_1":
            d_i_model = DealInsertModel()
            d_i_model.insert_pre = CoreSqlValue.insert_ig_it
            for item in v["data"]["children"]:
                temp = info_dicts.copy()
                count = item["text"]
                sub_count = re.findall(".*\((.*?)\).*", count)
                onclick = item["onclick"]
                sub = re.findall('Submit.naviSearch\("JSTMWT6S","CCL","(.*?)","(.*?)",this\)', onclick)
                if sub:
                    new_dict2 = {}
                    code = sub[0][0]
                    name = sub[0][1]
                    name = name.encode().decode('unicode_escape')
                    if len(code) == 1:
                        temp["task_tag"] = temp["task_tag_next"].split(";")[0]
                        start_code = code
                        start_name = name
                        new_dict2["field"] = name
                        new_dict2["number"] = sub_count[0]
                    else:
                        if code.startswith(start_code):
                            temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
                            new_dict2["his"] = "->".join([start_name, name])
                            new_dict2["field"] = name
                            new_dict2["number"] = sub_count[0]
                        else:
                            raise Exception("理论上不应该出现这种情况")

                    temp["page"] = math.ceil(int(sub_count[0]) / 20)
                    temp["page_index"] = 1
                    temp["home_rawid"] = code
                    temp["home_json"] = json.dumps(new_dict2, ensure_ascii=False)
                del temp["task_tag_next"]
                d_i_model.lists.append(temp)
                dum = DealUpdateModel()
                dum.update = {"home_json": temp["home_json"]}
                dum.where = {'task_name': temp["task_name"],
                             'task_tag': temp["task_tag"],
                             'page_index': 1,
                             'home_rawid': temp["home_rawid"]}
                result.befor_dicts.update_list.append(dum)
            result.befor_dicts.update.update({'is_active': 1, "page": -1})
            result.befor_dicts.insert.append(d_i_model)
    return result


# def cnkiqkhome_callback(value):
#     result = ""
#
#     result_raw = result
#     result = BaseUrl.urlQuery2Dict(value)
#     if isinstance(result, dict) and "baseid" in result.keys():
#         result = result["baseid"]
#     elif result_raw.find("index"):
#         result = result_raw.split("/")[-1]
#     else:
#         result = ""
#     return result

def cnkiqkhome_callback(value):
    try:
        url = value
        match = re.search(r'/journals/(\w+)/', url)
        if match:
            result = match.group(1)
        return result
    except :
        traceback.print_exc()
        return ""

def cnkijournal_cnkiqkhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    # 全部期刊初始化
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total = data["lblCount"]
        total_page = math.ceil(int(total) / 20) - 1
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            result.befor_dicts.update.update({'page': total_page})
            qk_name = item['qkname'].strip()
            if len(qk_name) != 0:
                organSpan = item['organSpan'].strip()
                fhfactorSpan = item['fhfactorSpan']
                zhfactorSpan = item['zhfactorSpan']
                bztimes = item['bztimes']
                flag = item['flag']
                pykm = item["pykm"]
                journal_rawid = cnkiqkhome_callback(pykm)
                new_dict = {}
                subject_forward = json.loads(callmodel.sql_model.home_json)
                subject = BaseDicts().is_dict_exit_key(subject_forward, "his")
                if subject != "":
                    special_name = subject.split("->")[0]
                    subject_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                else:
                    special_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                    subject_name = ""
                new_dict["publisher"] = organSpan
                new_dict["cnki_impact_fh"] = fhfactorSpan
                new_dict["cnki_impact_zh"] = zhfactorSpan
                new_dict["bztimes"] = bztimes
                new_dict["flag"] = flag
                new_dict["special_name"] = special_name
                new_dict["subject_name"] = subject_name
                new_dict["journal_name"] = qk_name

                temp_info = info_dicts.copy()
                task_tag_next = temp_info["task_tag_next"]
                temp_info["task_tag"] = task_tag_next
                del temp_info["task_tag_next"]
                temp_info["journal_rawid"] = journal_rawid
                temp_info["is_active"] = "0"
                # temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
                temp_info["journal_json"] = "{}"
                di_model_next.lists.append(temp_info)
                du_model = DealUpdateModel()
                # du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{field}")'})
                du_model.update.update({
                    "sub_db_id": "00002",
                    "is_active": "1"})
                du_model.where.update({"journal_rawid": journal_rawid,
                                       "task_tag": temp_info["task_tag"],
                                       "task_name": callmodel.sql_model.task_name,
                                       })
                duplicte = json_update(new_dict)
                du_model.update_no_placeholder.update({"journal_json": f"JSON_SET(journal_json, {duplicte})"})
                result.next_dicts.update_list.append(du_model)
        result.befor_dicts.update.update({'page': total_page, "is_active": 1})
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkijournal_cnkiqkclasshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    home_json = json.loads(callmodel.sql_model.home_json)
    his = home_json.get("his", "")
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total = data["lblCount"]
        total_page = math.ceil(int(total) / 20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page

                dum = DealUpdateModel()
                dum.update = {"home_json": sql_dict["home_json"]}
                dum.where = {'task_name': sql_dict["task_name"],
                             'task_tag': sql_dict["task_tag"],
                             'page_index': page,
                             'home_rawid': sql_dict["home_rawid"]}
                result.befor_dicts.update_list.append(dum)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.update.update({'page': total_page, "is_active": 1})
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            qk_name = item['qkname'].strip()
            if len(qk_name) != 0:
                organSpan = item['organSpan'].strip()
                fhfactorSpan = item['fhfactorSpan']
                zhfactorSpan = item['zhfactorSpan']
                bztimes = item['bztimes']
                flag = item['flag']
                pykm = item["pykm"]
                journal_rawid = cnkiqkhome_callback(pykm)
                new_dict = {}
                subject_forward = json.loads(callmodel.sql_model.home_json)
                subject = BaseDicts().is_dict_exit_key(subject_forward, "his")
                if subject != "":
                    special_name = subject.split("->")[0]
                    subject_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                else:
                    special_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                    subject_name = ""
                new_dict["publisher"] = organSpan
                new_dict["cnki_impact_fh"] = fhfactorSpan
                new_dict["cnki_impact_zh"] = zhfactorSpan
                new_dict["bztimes"] = bztimes
                new_dict["flag"] = flag
                new_dict["special_name"] = special_name
                new_dict["subject_name"] = subject_name
                new_dict["journal_name"] = qk_name

                temp_info = info_dicts.copy()
                task_tag_next = temp_info["task_tag_next"]
                temp_info["task_tag"] = task_tag_next
                del temp_info["task_tag_next"]
                temp_info["journal_rawid"] = journal_rawid
                temp_info["is_active"] = "0"
                temp_info["journal_json"] = "{}"
                # temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
                di_model_next.lists.append(temp_info)
                du_model = DealUpdateModel()
                du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
                du_model.update.update({
                    # "journal_json": json.dumps(new_dict, ensure_ascii=False),
                    "sub_db_id": "00002",
                    "is_active": "1"})
                du_model.where.update({"journal_rawid": journal_rawid,
                                       "task_tag": temp_info["task_tag"],
                                       "task_name": callmodel.sql_model.task_name,
                                       })
                duplicte = json_update(new_dict)
                du_model.update_no_placeholder.update({"journal_json": f"JSON_SET(journal_json, {duplicte})"})
                result.next_dicts.update_list.append(du_model)
        result.befor_dicts.update.update({'page': total_page, "is_active": 1})
        result.next_dicts.insert.append(di_model_next)
    return result


def cnkijournal_cnkijclasshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    start_code = ""
    start_name = ""
    for k, v in para_dicts["data"].items():
        if k == "1_1":
            d_i_model = DealInsertModel()
            d_i_model.insert_pre = CoreSqlValue.insert_ig_it
            for item in v["data"]["children"]:
                temp = info_dicts.copy()
                count = item["text"]
                sub_count = re.findall(".*\((.*?)\).*", count)
                onclick = item["onclick"]
                sub = re.findall('Submit.naviSearch\("JSTMWT6S","CCL","(.*?)","(.*?)",this\)', onclick)
                if sub:
                    new_dict2 = {}
                    code = sub[0][0]
                    name = sub[0][1]
                    name = name.encode().decode('unicode_escape')
                    if len(code) == 1:
                        temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
                        start_code = code
                        start_name = name
                        new_dict2["field"] = name
                        new_dict2["number"] = sub_count[0]
                    else:
                        if code.startswith(start_code):
                            temp["task_tag"] = temp["task_tag_next"].split(";")[0]
                            new_dict2["his"] = "->".join([start_name, name])
                            new_dict2["field"] = name
                            new_dict2["number"] = sub_count[0]
                        else:
                            raise Exception("理论上不应该出现这种情况")

                    temp["page"] = math.ceil(int(sub_count[0]) / 20)
                    temp["page_index"] = 1
                    temp["home_rawid"] = code
                    temp["home_json"] = json.dumps(new_dict2, ensure_ascii=False)
                del temp["task_tag_next"]
                d_i_model.lists.append(temp)
                dum = DealUpdateModel()
                dum.update = {"home_json": temp["home_json"]}
                dum.where = {'task_name': temp["task_name"],
                             'task_tag': temp["task_tag"],
                             'page_index': 1,
                             'home_rawid': temp["home_rawid"]}
                result.befor_dicts.update_list.append(dum)
            result.befor_dicts.update.update({'is_active': 1, "page": -1})
            result.befor_dicts.insert.append(d_i_model)
    return result


def cnkijournal_cnkijqkclasshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    home_json = json.loads(callmodel.sql_model.home_json)
    his = home_json.get("his", "")
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total = data["lblCount"]
        total_page = math.ceil(int(total) / 20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]["children"]:
            qk_name = item['qkname'].strip()
            if len(qk_name) != 0:
                organSpan = item['organSpan'].strip()
                fhfactorSpan = item['fhfactorSpan']
                zhfactorSpan = item['zhfactorSpan']
                bztimes = item['bztimes']
                flag = item['flag']
                pykm = item["pykm"]
                journal_rawid = cnkiqkhome_callback(pykm)
                new_dict = {}
                subject_forward = json.loads(callmodel.sql_model.home_json)
                subject = BaseDicts().is_dict_exit_key(subject_forward, "his")
                if subject != "":
                    special_name = subject.split("->")[0]
                    subject_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                else:
                    special_name = BaseDicts().is_dict_exit_key(subject_forward, "field")
                    subject_name = ""
                new_dict["publisher"] = organSpan
                new_dict["cnki_impact_fh"] = fhfactorSpan
                new_dict["cnki_impact_zh"] = zhfactorSpan
                new_dict["bztimes"] = bztimes
                new_dict["flag"] = flag
                new_dict["special_name"] = special_name
                new_dict["subject_name"] = subject_name

                temp_info = info_dicts.copy()
                task_tag_next = temp_info["task_tag_next"]
                temp_info["task_tag"] = task_tag_next
                del temp_info["task_tag_next"]
                temp_info["journal_rawid"] = journal_rawid
                temp_info["is_active"] = "1"
                temp_info["is_cnki_j"] = "1"
                temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
                di_model_next.lists.append(temp_info)
                du_model = DealUpdateModel()
                du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
                du_model.update.update({"is_active": "1",
                                        "is_cnki_j": "1"})
                du_model.where.update({"journal_rawid": journal_rawid,
                                       "task_tag": temp_info["task_tag"],
                                       "task_name": callmodel.sql_model.task_name,
                                       })
                result.next_dicts.update_list.append(du_model)
        result.befor_dicts.update.update({'page': total_page, "is_active": 1})
        result.next_dicts.insert.append(di_model_next)
    return result


def cleanSemicolon(text):
    text = text.replace('；', ';')  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def checkExist(obj):
    if obj is not None and len(obj) > 0:
        return True
    else:
        return False


def is_chinese(check_str):
    # for ch in check_str.decode('utf-8'):
    #     if u'\u4e00' <= ch <= u'\u9fff':
    #         return True
    # return False
    zhmodel = re.compile(u'[\u4e00-\u9fa5]')
    match_1 = zhmodel.search(check_str)
    if match_1:
        return True
    return False


def cnkijournal_cnkinet1starticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        # 感觉没什么用，暂时不启动10这个标
        #         if self.pm.task_tag == "cnkinet1starticle" and "except" in err_info.keys() and "dbcode" in err_info[
        #             "except"]:
        #             code = 10
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    journal_info = sql_model["journal_info"]
    article_info_json = json.loads(sql_model["article_info_json"])

    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["batch"] = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    sub_db_id = "00393"
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    data["rawid_alt"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["is_deprecated"] = "0"
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = "CNKI"
    data["sub_db"] = "CJFD_NET1ST"
    data["provider"] = "CNKI"
    data["zt_provider"] = "cnkijournal_net1st"
    data["sub_db_id"] = sub_db_id
    data["source_type"] = "3"
    # data["provider_url"] = "http://kns.cnki.net/KCMS/detail/detail.aspx?dbcode=CJFD&dbname=CCJDLAST2&filename=" + rawid
    data["journal_raw_id"] = sql_model["journal_rawid"]
    data["journal_name"] = journal_info.get("journal_name", "")
    data["journal_name_alt"] = journal_info.get("journal_name_alt", "")
    data["issn"] = journal_info.get("issn", "")
    data["cnno"] = journal_info.get("cnno", "")
    country = "CN"
    if "美国" in journal_info.get("pub_place", ""):
        country = "US"
    data["country"] = country
    language = "ZH"
    if "英文" in journal_info.get("language", ""):
        language = "EN"
    data["language"] = language
    sel = Selector(src_data.html)
    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'

    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    # print(sel.xpath("//div[@class='wx-tit']/h1").extract())
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())

    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title

        # author_num = 0
        # author = ""
        # author_id = ""
        # author_1st = ""
        # list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")
        # for au_span in list_au_span:
        #     author_num = author_num + 1
        #     au_a = au_span.xpath("./a")
        #     au_code = au_span.xpath("./input[@class='authorcode']/@value").get("")
        #     if checkExist(au_a):
        #         au_name = au_a.xpath("./text()").get().strip()
        #         author_nums = au_a.xpath("./sup/text()").get()
        #         if checkExist(author_nums):
        #             author += au_name + "[" + author_nums + "]" + ";"
        #         else:
        #             author += au_name + ";"
        #         if not checkExist(au_code):
        #             click_au = au_a.xpath("./@onclick").get()
        #             if checkExist(click_au):
        #                 tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au)
        #                 if checkExist(tmp_click_au):
        #                     au_code = tmp_click_au[0].strip()
        #         if len(au_code) > 0:
        #             author_id += au_code + "@" + au_name + ";"
        #     else:
        #         au_name = au_span.xpath("./text()").get().strip()
        #         author += au_name + ";"
        # author = cleanSemicolon(author)
        # author_id = cleanSemicolon(author_id)
        # if len(author) > 0:
        #     vec = author.split(";")
        #     if len(vec):
        #         author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
        # 20251029
    author_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
            if '<sup>' in span.extract():
                author_info = ''.join(span.xpath(".//text()[not(ancestor::sup)]").extract()).strip()
                sup = ''.join(span.xpath(".//sup/text()").extract()).strip()
                author = f'{author_info}[{sup}]'
            else:
                author = ''.join(span.xpath(".//text()").extract()).strip()
            author_list.append(author)
    author = ';'.join(author_list)
    author_id = ''
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info', [])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    author = author if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author)) else ''
    data["author"] = author
    corr_author_list = list()
    corr_s = re.findall('var cau = "(.*?)";', src_data.html)
    if corr_s:
        for corr_author in corr_s[0].split(';'):
            if corr_author:
                corr_author_list.append(corr_author)
    data["corr_author"] = ';'.join(corr_author_list)
    data["author_id"] = author_id
    data["author_1st"] = re.sub('\[.*?\]', '', author.split(';')[0], 1)
    # data["author_num"] = str(author_num)
    # data["author"] = author if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author)) else ''
    # data["author_id"] = author_id if len(author_id) >= 8 else ''
    # data["author_1st"] = author_1st if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author_1st)) else ''
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    # organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    # if checkExist(organ_h3):
    #     list_organs = set()
    #     list_og_a = organ_h3.xpath("./a")
    #     if not checkExist(list_og_a):
    #         list_og_a = organ_h3.xpath("./span/a")
    #     if checkExist(list_og_a):
    #         for a_og in list_og_a:
    #             og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
    #             click_og = a_og.xpath("./@onclick").get()
    #             if checkExist(click_og) and "'in'" in click_og:
    #                 tmp_click_og = re.findall("^.*','(\d+)'.*\);$", click_og.strip())
    #                 if checkExist(tmp_click_og):
    #                     og_code = tmp_click_og[0].strip()
    #                     organ_id += og_code + "@" + re.sub("(\d+)\.", "", og_name) + ";"
    #             og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
    #             list_organs.add(cleanSemicolon(og_name))
    #             # organ += og_name
    #     list_span_og = organ_h3.xpath("./span/text()")
    #     if not checkExist(list_span_og):
    #         list_span_og = organ_h3.xpath("./text()")
    #
    #     if checkExist(list_span_og):
    #         if len(list_span_og) == 1 and "2." in list_span_og.get():
    #             list_span_og = list_span_og.get().split("\n")
    #         else:
    #             tmp_list = []
    #             for span_og in list_span_og:
    #                 tmp_list.append(span_og.get())
    #             list_span_og = tmp_list
    #         for span_og in list_span_og:
    #             span_og = cleanSemicolon(span_og).encode('unicode_escape').decode('unicode_escape')
    #             og_name = cleanSemicolon(re.sub("(\d+)\.", r"[\1]", span_og + ";"))
    #             if len(og_name) > 0:
    #                 list_organs.add(cleanSemicolon(og_name))
    #     if len(list_organs) > 0:
    #         list_organs = list(list_organs)
    #         print(list_organs)
    #         if list_organs[0][0] == "[":
    #             list_organs = sorted(list_organs, key=lambda x: int(re.search(r'\[(\d+)\]', x).group(1)))  #机构 organ 排序实现
    #         organ = ";".join(list_organs)
    # organ = cleanSemicolon(organ)
    # organ_id = cleanSemicolon(organ_id)
    # if len(organ) > 0:
    #     vec = organ.split(";")
    #     if len(vec) > 0:
    #         organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    organ_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
            organ_name = ''.join(span.xpath(".//text()").extract()).strip()
            og_name = re.sub("(\d+)\.", r"[\1]", organ_name, count=1)
            organ_list.append(og_name)
    if organ_list:
        if re.findall(r'\[(\d+)\]', organ_list[0]):
            organ_list = sorted(organ_list, key=lambda x: int(re.search(r'\[(\d+)\]', x).group(1)))
        organ = ';'.join(organ_list)
        organ_1st = organ_list[0]
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = re.sub('\[.*?\]', '', organ.split(';')[0], 1)
    # 20250722 添加
    if ',' in author and author_1st == author:
        if not any(i in author for i in ['(', ')', '（', '）']):
            author_list = [i.strip() for i in author.split(',')]
            data['author_1st'] = author_list[0]
            data['author'] = ';'.join(author_list)
        if not any(i in organ for i in ['(', ')', '（', '）', ';']):
            organ_list = [i.strip() for i in organ.split(',')]
            data['organ_1st'] = organ_list[0]
            data['organ'] = ';'.join(organ_list)
    abstract_ = data["abstract"]
    abstract_type = ""
    if "<正>" in abstract_:
        data["abstract"] = abstract_.replace("<正>", "")
        abstract_type = "第一段"
    data["abstract_type"] = abstract_type
    fund_id = ""
    fund = ""
    fund_p = sel.xpath("//p[@class='funds']")
    if checkExist(fund_p):
        list_fund_a = fund_p.xpath("./a")
        if checkExist(list_fund_a):
            for a_fund in list_fund_a:
                fund_item = a_fund.xpath("./text()").get().strip()
                fund_item = re.sub(r"[；\s]+$", "", fund_item)
                fund += fund_item + ";"
                click_fund = a_fund.xpath("./@onclick").get()
                if checkExist(click_fund):
                    tmp_click_fund = re.findall("^.*','(\d+)'.*\);$", click_fund)
                    if checkExist(tmp_click_fund):
                        tmp_id = tmp_click_fund[0].strip()
                        fund_id += tmp_id + "@" + fund_item + ";"
    fund = sel.xpath("string(//p[@class='funds'])").get("")
    data["fund_id"] = cleanSemicolon(fund_id)
    data["fund"] = cleanSemicolon(fund)
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a/text()")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.get()) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    clc_no = sel.xpath("//li[@class='top-space' and  contains(string(), '分类号')]/p/text()", ).get("").strip().replace(' ', '')
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    # 20241010 修改 中图分类号 分类号字母出现位置不得超过前两位
    for clc in clc_no.split(';'):
        if bool(re.search(r'[a-zA-Z]', clc[2:])):
            clc_no = ''
            clc_no_1st = ''
            break
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["subject"] = cleanSemicolon(data["subject"])
    down_cnt = data["down_cnt"].replace("下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    jump_page = ""
    begin_page = ""
    end_page = ""
    line = data["page_info"].replace("页码：", "")
    data["page_info"] = line
    idx = line.find('+')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = data["page_cnt"].replace("页数：", "")
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf' and  contains(string(), 'PDF下载')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj' and  contains(string(), 'CAJ下载')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-html' and  contains(string(), 'HTML阅读')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    pub_date = data["pub_date"].strip()
    pub_year = ""
    if len(pub_date) > 0:
        pub_date = pub_date.split("：")[1].split(" ")[0]
        # "（录用定稿）网络首发时间：2021-03-19 17:27:32"
        pub_year = pub_date.split("-")[0]
        pub_date = pub_date.replace("-", "")
    if pub_year == "":
        pub_date = article_info_json.get("times","")
        if pub_date:
            pub_year = pub_date.split("-")[0]
            pub_date = pub_date.split(" ")[0].replace("-", "")
    if pub_year == "":
        pub_year = datetime.datetime.now().strftime('%Y')
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    # data_over = para_dicts["data"]["2_1"]
    # title_alt = data_over["title_alt"]
    # if is_chinese(title_alt):
    #     title_alt = ""
    # data["title_alt"] = title_alt
    # author_raw = cleanSemicolon(data_over["author_raw"]).replace("<author>", "")
    # if is_chinese(author_raw):
    #     author_raw = ""
    # data["author_raw"] = author_raw
    # abstract_alt = data_over["abstract_alt"].replace("<abstract>", "")
    # if is_chinese(abstract_alt):
    #     abstract_alt = ""
    # data["abstract_alt"] = abstract_alt.replace("<正>", "")
    # keyword_alt = cleanSemicolon(data_over["keyword_alt"])
    # if is_chinese(keyword_alt):
    #     keyword_alt = ""
    # data["keyword_alt"] = keyword_alt
    result.save_data = [{"table": "journal_latest", "data": data}]
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkijournal_cnkinet1starticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkijournal_cnkinet1starticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    return result


def cnkijournal_cnkiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    jrawids_dic = para_dicts["journal_rawids"]
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    journal_info = sql_model["journal_info"]
    article_info_json = json.loads(sql_model["article_info_json"])
    data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]
    data_refcnt = para_dicts["data"]["1_2"]
    data_ref = down_model["1_3"].dict()
    data_cited = down_model["1_4"].dict()
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    REFERENCE = 0
    CITING = 0
    if "REFERENCE" in data_refcnt.keys():
        REFERENCE = data_refcnt.get("REFERENCE", "0")
        CITING = data_refcnt.get("CITING", "0")
    else:
        for item in data_refcnt["data"]:
            if item["name"] == "references":
                REFERENCE = item["value"]
            if item["name"] == "citations":
                CITING = item["value"]
    data["ref_cnt"] = str(REFERENCE)
    cited_cnt = str(CITING)
    if cited_cnt.isdigit():
        cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00002"
    product = "CNKI"
    sub_db = "CJFD"
    provider = "CNKI"
    source_type = "3"
    rawid = data["rawid"]
    data["rawid_alt"] = rawid
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["zt_provider"] = "cnkijournal"
    data["source_type"] = source_type
    # data["provider_url"] = "http://kns.cnki.net/KCMS/detail/detail.aspx?dbcode=" + data["dbcode"] + "&dbname=" + data[
    #     "dbname"] + "&filename=" + rawid
    data["journal_raw_id"] = sql_model["journal_rawid"]
    data["journal_name"] = journal_info["journal_name"]
    data["journal_name_alt"] = journal_info.get("journal_name_alt", "")
    data["issn"] = journal_info.get("issn", "")
    data["cnno"] = journal_info.get("cnno", "")
    data["column_info"] = article_info_json.get("column_info", "")

    country = "CN"
    if "美国" in journal_info.get("pub_place", ""):
        country = "US"
    data["country"] = country
    language = "ZH"
    language_s = journal_info.get("language", "")
    if "英文" in language_s and "中文" not in language_s:
        language = "EN"
    if "英文" in language_s and "中文" in language_s:
        language = "ZH;EN"
    data["language"] = language

    sel = Selector(src_data.html)
    v = sel.xpath('//input[@id="copy"]/@value').extract_first()
    if not v:
        raise Exception('provider_url 解析错误')
    data["provider_url"] = f'https://kns.cnki.net/kcms2/article/abstract?{v}'

    h1_title = sel.xpath("//div[@class='wx-tit']/h1").get()
    if checkExist(h1_title):
        # 需要移除的子标签的html
        h1_remove = sel.xpath("//div[@class='wx-tit']/h1/*[@*]").getall()
        tt_temp = h1_title
        for xx in h1_remove:
            tt_temp = tt_temp.replace(xx, "")
        h1_use = tt_temp
        sel_h1 = Selector(h1_use)
        h1_title = sel_h1.xpath("//h1")
        if checkExist(h1_title):
            data["title"] = cleanSemicolon(h1_title[0].xpath("string(.)").get())

    if len(data["title"]) < 1:
        title = sel.xpath("//title/text()").get()
        if "- 中国知网" in title:
            title = title.replace("- 中国知网", "")
        data["title"] = title
    # author_num = 0
    # author = ""
    # author_id = ""
    # author_1st = ""
    # list_au_span = sel.xpath("//div[@class='wx-tit']/h3[1]/span")
    # for au_span in list_au_span:
    #     author_num = author_num + 1
    #     au_a = au_span.xpath("./a")
    #     au_code = au_span.xpath("./input[@class='authorcode']/@value").get("")
    #     if checkExist(au_a):
    #         au_name = au_a.xpath("./text()").get().strip()
    #         author_nums = au_a.xpath("./sup/text()").get()
    #         if checkExist(author_nums):
    #             author += au_name + "[" + author_nums + "]" + ";"
    #         else:
    #             author += au_name + ";"
    #         if not checkExist(au_code):
    #             click_au = au_a.xpath("./@onclick").get()
    #             if checkExist(click_au):
    #                 tmp_click_au = re.findall("^.*','(\d+)'.*\);$", click_au)
    #                 if checkExist(tmp_click_au):
    #                     au_code = tmp_click_au[0].strip()
    #         if len(au_code) > 0:
    #             author_id += au_code + "@" + au_name + ";"
    #     else:
    #         au_name = au_span.xpath("./text()").get().strip()
    #         author += au_name + ";"
    # author = cleanSemicolon(author)
    # author_id = cleanSemicolon(author_id)
    # if len(author) > 0:
    #     vec = author.split(";")
    #     if len(vec):
    #         author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    # 20251029
    author_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[1]/span'):
            if '<sup>' in span.extract():
                author_info = ''.join(span.xpath(".//text()[not(ancestor::sup)]").extract()).strip()
                sup = ''.join(span.xpath(".//sup/text()").extract()).strip()
                author = f'{author_info}[{sup}]'
            else:
                author = ''.join(span.xpath(".//text()").extract()).strip()
            author_list.append(author)
    author = ';'.join(author_list)
    author_id = ''
    # 20250107
    if (not author_id) and src_data.author_organ:
        author_info = json.loads(src_data.author_organ).get('author_info', [])
        _ids = list()
        for aut in author.split(';'):
            for item in author_info:
                if aut != item['author_name']:
                    continue
                _ids.append(f"{item['author_code']}@{aut}")
        author_id = ';'.join(_ids)
    if len(author) > 0:
        vec = author.split(";")
        if len(vec):
            author_1st = re.sub(r"\[[,\d+]*?\]$", "", vec[0])
    author = author if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author)) else ''
    data["author"] = author
    corr_author_list = list()
    corr_s = re.findall('var cau = "(.*?)";', src_data.html)
    if corr_s:
        for corr_author in corr_s[0].split(';'):
            if corr_author:
                corr_author_list.append(corr_author)
    data["corr_author"] = ';'.join(corr_author_list)
    data["author_id"] = author_id
    data["author_1st"] = re.sub('\[.*?\]','', author.split(';')[0], 1)
    # data["author_num"] = str(author_num)
    # data["author"] = author if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author)) else ''
    # data["author_id"] = author_id if len(author_id) >= 8 else ''
    # data["author_1st"] = author_1st if bool(re.search(r'[\u4e00-\u9fa5a-zA-Z]', author_1st)) else ''
    email = ""
    list_email = re.findall(r"setAUCommFlag\('([^']+)','([^']+)'\);", src_data.html)
    if checkExist(list_email):
        auLine = list_email[0][0]
        emailLine = list_email[0][1]
        auVec = auLine.split(";")
        emailVec = emailLine.split(";")
        if len(auVec) == len(emailVec):
            for i in range(0, len(emailVec)):
                email += emailVec[i].strip() + ":" + auVec[i].strip() + ";"
        email = cleanSemicolon(email)
    data["email"] = email
    organ = ""
    organ_id = ""
    organ_1st = ""
    # organ_h3 = sel.xpath("//div[@class='wx-tit']/h3[2]")
    # if checkExist(organ_h3):
    #     list_organs = set()
    #     list_og_a = organ_h3.xpath("./a")
    #     if not checkExist(list_og_a):
    #         list_og_a = organ_h3.xpath("./span/a")
    #     if checkExist(list_og_a):
    #         for a_og in list_og_a:
    #             og_name = ''.join(a_og.xpath(".//text()").extract()).strip()
    #             click_og = a_og.xpath("./@onclick").get()
    #             if checkExist(click_og) and "'in'" in click_og:
    #                 tmp_click_og = re.findall("^.*','(\d+)'.*\);$", click_og.strip())
    #                 if checkExist(tmp_click_og):
    #                     og_code = tmp_click_og[0].strip()
    #                     organ_id += og_code + "@" + re.sub("(\d+)\.", "", og_name) + ";"
    #             og_name = re.sub("(\d+)\.", r"[\1]", og_name + ";", count=1)
    #             list_organs.add(cleanSemicolon(og_name))
    #             # organ += og_name
    #     list_span_og = organ_h3.xpath("./span/text()")
    #     if not checkExist(list_span_og):
    #         list_span_og = organ_h3.xpath("./text()")
    #
    #     if checkExist(list_span_og):
    #         if len(list_span_og) == 1 and "2." in list_span_og.get():
    #             list_span_og = list_span_og.get().split("\n")
    #         else:
    #             tmp_list = []
    #             for span_og in list_span_og:
    #                 tmp_list.append(span_og.get())
    #             list_span_og = tmp_list
    #         for span_og in list_span_og:
    #             span_og = cleanSemicolon(span_og).encode('unicode_escape').decode('unicode_escape')
    #             og_name = cleanSemicolon(re.sub("(\d+)\.", r"[\1]", span_og + ";"))
    #             if len(og_name) > 0:
    #                 list_organs.add(cleanSemicolon(og_name))
    #     if len(list_organs) > 0:
    #         list_organs = list(list_organs)
    #         print(list_organs)
    #         if list_organs[0][0] == "[":
    #             list_organs = sorted(list_organs, key=lambda x: int(re.search(r'\[(\d+)\]', x).group(1)))  #机构 organ 排序实现
    #         organ = ";".join(list_organs)
    # organ = cleanSemicolon(organ)
    # organ_id = cleanSemicolon(organ_id)
    # if len(organ) > 0:
    #     vec = organ.split(";")
    #     if len(vec) > 0:
    #         organ_1st = re.sub(r"^\[[,\d+]*?\\]", "", vec[0])
    organ_list = list()
    if sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
        for span in sel.xpath('//div[@class="wx-tit"]/h3[2]/span'):
            organ_name = ''.join(span.xpath(".//text()").extract()).strip()
            og_name = re.sub("(\d+)\.", r"[\1]", organ_name, count=1)
            organ_list.append(og_name)
    if organ_list:
        if re.findall(r'\[(\d+)\]', organ_list[0]):
            organ_list = sorted(organ_list, key=lambda x: int(re.search(r'\[(\d+)\]', x).group(1)))
        organ = ';'.join(organ_list)
        organ_1st = organ_list[0]
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = re.sub('\[.*?\]','', organ.split(';')[0], 1)
    # 20250722 添加
    if ',' in author and author_1st == author:
        if not any(i in author for i in ['(', ')', '（', '）']):
            author_list = [i.strip() for i in author.split(',')]
            data['author_1st'] = author_list[0]
            data['author'] = ';'.join(author_list)
        if not any(i in organ for i in ['(', ')', '（', '）', ';']):
            organ_list = [i.strip() for i in organ.split(',')]
            data['organ_1st'] = organ_list[0]
            data['organ'] = ';'.join(organ_list)
    abstract_ = data["abstract"]
    abstract_type = ""
    if "<正>" in abstract_:
        data["abstract"] = abstract_.replace("<正>", "")
        abstract_type = "第一段"
    data["abstract_type"] = abstract_type
    fund_id = ""
    fund = ""
    fund_p = sel.xpath("//p[@class='funds']")
    if checkExist(fund_p):
        list_fund_a = fund_p.xpath("./a")
        if checkExist(list_fund_a):
            for a_fund in list_fund_a:
                fund_item = a_fund.xpath("./text()").get().strip()
                fund_item = re.sub(r"[；\s]+$", "", fund_item)
                fund += fund_item + ";"
                click_fund = a_fund.xpath("./@onclick").get()
                if checkExist(click_fund):
                    tmp_click_fund = re.findall("^.*','(\d+)'.*\);$", click_fund)
                    if checkExist(tmp_click_fund):
                        tmp_id = tmp_click_fund[0].strip()
                        fund_id += tmp_id + "@" + fund_item + ";"
    fund = sel.xpath("string(//p[@class='funds'])").get("")
    data["fund_id"] = cleanSemicolon(fund_id)
    data["fund"] = cleanSemicolon(fund)
    keyword = ""
    list_a_kw = sel.xpath("//p[@class='keywords']/a")
    if checkExist(list_a_kw):
        for item in list_a_kw:
            keyword += re.sub(r"[;；\s]+$", "", item.xpath("string(.)").get("")) + ";"
    data["keyword"] = cleanSemicolon(keyword)
    clc_no = sel.xpath("//li[@class='top-space' and  contains(string(), '分类号')]/p/text()", ).get("").strip().replace(' ', '')
    clc_no_1st = ""
    if clc_no == "+":
        clc_no = ""
    if len(clc_no) > 0:
        vec = clc_no.split(";")
        if len(vec) > 0:
            clc_no_1st = vec[0]
    # 20241010 修改 中图分类号 分类号字母出现位置不得超过前两位
    for clc in clc_no.split(';'):
        if bool(re.search(r'[a-zA-Z]', clc[2:])):
            clc_no = ''
            clc_no_1st = ''
            break
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["subject"] = cleanSemicolon(data["subject"])
    down_cnt = data["down_cnt"].replace("下载：", "")
    if down_cnt.isdigit():
        down_cnt = "{}@{}".format(down_cnt, down_date)
    else:
        down_cnt = ""
    data["down_cnt"] = down_cnt
    jump_page = ""
    begin_page = ""
    end_page = ""
    line = data["page_info"]
    if len(line.strip()) == 0:
        state_dict = src_data.__dict__.get("state_dict", "")
        if len(state_dict) > 0:
            search_html = state_dict.get("search_html", "")
            page_sel = Selector(search_html)
            line = page_sel.xpath("//p[@class='total-inform']/span[contains(string(), '页码')]/text()").get("").strip()
            data["page_cnt"] = page_sel.xpath("//p[@class='total-inform']/span[contains(string(), '页数')]/text()").get(
                "").strip()
    line = line.replace("页码：", "")
    data["page_info"] = line
    idx = line.find('+')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = data["page_cnt"].replace("页数：", "")
    fulltext_type = ""
    pdf = sel.xpath("//li[@class='btn-dlpdf' and  contains(string(), 'PDF下载')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    caj = sel.xpath("//li[@class='btn-dlcaj' and  contains(string(), 'CAJ下载')]")
    if checkExist(caj):
        fulltext_type += ";caj"
    html = sel.xpath("//li[@class='btn-html' and  contains(string(), 'HTML阅读')]")
    if checkExist(html):
        fulltext_type += ";html"
    data["fulltext_type"] = cleanSemicolon(fulltext_type)
    text_vol = sel.xpath("//div[@class='top-tip']/span/a[contains(@onclick,'YearNav')]/text()").get("")
    if not checkExist(text_vol):
        text_vol = sel.xpath("//div[@class='top-tip']/span/a/text()").get("")
    vol = ""
    if "," in text_vol and "(" in text_vol:
        # 2018,38(08)
        vol = text_vol.split(",")[1].split("(")[0]
    data["vol"] = vol
    tmp_rawds = sql_model["rawid"].split(",")
    data["num"] = tmp_rawds[2]
    pub_date = ""
    pub_year = tmp_rawds[1]
    if pub_year != "":
        pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    # 20230410 cnki海外版没有了
    # data_over = para_dicts["data"]["2_1"]
    # src_data = down_model["2_1"]
    # sel = Selector(src_data.html)
    # title_alt = ""
    # h1_title = sel.xpath("//h1").get()
    # if checkExist(h1_title):
    #     # 需要移除的子标签的html
    #     h1_remove = sel.xpath("//h1/*[@*]").getall()
    #     tt_temp = h1_title
    #     for xx in h1_remove:
    #         tt_temp = tt_temp.replace(xx, "")
    #     h1_use = tt_temp
    #     sel_h1 = Selector(h1_use)
    #     h1_title_alt = sel_h1.xpath("//h1")
    #     if checkExist(h1_title_alt):
    #         title_alt = cleanSemicolon(h1_title_alt[0].xpath("string(.)").get())
    # if is_chinese(title_alt):
    #     title_alt = ""
    # data["title_alt"] = title_alt
    # author_raw = cleanSemicolon(data_over["author_raw"]).replace("<author>", "")
    # if is_chinese(author_raw):
    #     author_raw = ""
    # data["author_raw"] = author_raw
    # abstract_alt = data_over["abstract_alt"].replace("<abstract>", "")
    # if is_chinese(abstract_alt):
    #     abstract_alt = ""
    # data["abstract_alt"] = abstract_alt.replace("<正>", "")
    # keyword_alt = cleanSemicolon(data_over["keyword_alt"])
    # if is_chinese(keyword_alt):
    #     keyword_alt = ""
    # data["keyword_alt"] = keyword_alt
    status = "FAILED"
    err_msg = ""
    if len(data["rawid"]) < 1:
        err_msg = "cnkijournal_cnkiarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "cnkijournal_cnkiarticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    save_data = []
    save_data.append({"table": "journal_latest", "data": data})
    # result.save_data = data
    extra_meta = get_extra_meta(data, jrawids_dic)
    save_data.extend(extra_meta)
    ref_id = ""
    list_ref = []
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref, repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "journal_ref_latest", "data": ref_data})
    elif str(data["ref_cnt"]) == "0":
        result.ref_state = {
            "lngid": lngid
        }
    result.save_data = save_data
    return result


def cnkijournal_cnkiarticle_ref_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    data = para_dicts["meta_info"]
    down_model = callmodel.down_model.down_dict
    data_ref = down_model["1_3"].dict()
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    sub_db_id = "00002"
    source_type = "3"
    save_data = []
    ref_id = ""
    list_ref = []
    repeat_cnt = 0
    allref = {"first": data_ref["first_page"]["html"]}
    refs = data_ref["type_page"]["type_code"]
    for key in refs.keys():
        page_html = refs[key]["page_html"]
        for key_page in page_html.keys():
            la_key = "_".join([key, key_page])
            html = page_html[key_page]["html"]
            allref[la_key] = html
    if len(allref) > 0:
        ref_down_date = data_ref["first_page"]["down_date"].split(" ")[0].replace("-", "")
        ref_id, list_ref, repeat_cnt = cnkiarticle_ref_info_parse(data, allref, ref_down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = data["lngid"]
        ref_data["lngid"] = data["lngid"]
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = data["pub_year"]
        ref_data["batch"] = batch
        ref_data["down_date"] = ref_down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "journal_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": data["lngid"]
        }
    result.save_data = save_data
    return result


def getStrtype(tp, tpcn):
    stdict = {
        "期刊": "J",
        "CJFQ": "J",
        "CJFD": "J",
        "SSJD": "J",
        "JOURNAL": "J",
        "QUOTATION_INT_JOURNAL": "J",
        "国际期刊": "J",
        "硕士": "D",
        "博士": "D",
        "CDFD": "D",
        "CMFD": "D",
        "DISSERTATION_MD": "D",
        "DISSERTATION_PHD": "D",
        "国内会议": "C",
        "国际会议": "C",
        "CPFD": "C",
        "IPFD": "C",
        "CONFERENCE_INT": "C",
        "CONFERENCE_CHN": "C",
        "专利": "P",
        "PATENT_CHN": "P",
        "SCPD": "P",
        "图书": "M",
        "BOOK": "M",
        "CBBD": "M",
        "报纸": "N",
        "CCND": "N",
        "NEWSPAPER": "N",
        "标准": "S",
        "SCSD": "S",
        "STANDARD_CHN": "S",
    }
    strtype = stdict.get(tp, "")
    if len(strtype) == 0:
        strtype = stdict.get(tpcn, "")
    if len(strtype) == 0:
        strtype = "K"
    return strtype


def get_strtype(tp):
    sdic = {
        "CJFD": "期刊",
        "CJFQ": "期刊",
        "CDFD": "博士",
        "CMFD": "硕士",
        "CPFD": "中国会议",
        "IPFD": "国际会议",
        "CCND": "报纸",
        "CYFD": "年鉴",
        "SCPD": "中国专利",
        "SOPD": "国外专利",
        "SCSF": "国家标准",
        "SCHF": "行业标准",
        "SCSD": "中国标准",
        "SOSD": "国外标准",
        "SNAD": "科技成果",
        "SSJD": "国际期刊",
        "WFBREF": "中外文题录",
        "CRLDENG": "中外文题录",
        "BOOK": "图书",
        "CBBD": "图书",

    }
    stdict = {
        "CJFD": "J",
        "CJFQ": "J",
        "SSJD": "J",
        "JOURNAL": "J",
        "WWJD": "J",
        "QUOTATION_INT_JOURNAL": "J",
        "CDFD": "D",
        "CMFD": "D",
        "DISSERTATION_MD": "D",
        "DISSERTATION_PHD": "D",
        "CPFD": "C",
        "IPFD": "C",
        "CONFERENCE_INT": "C",
        "CONFERENCE_CHN": "C",
        "CCND": "N",
        "NEWSPAPER": "N",
        "SCPD": "P",
        "SOPD": "P",
        "PATENT_CHN": "P",
        "SCSF": "S",
        "SCHF": "S",
        "STANDARD_CHN": "S",
        "SCSD": "S",
        "SOSD": "S",
        "BOOK": "M",
        "CBBD": "M",
    }
    sub_dic = {
        "CJFD": "00002",
        "CJFQ": "00002",
        "CDFD": "00310",
        "CMFD": "00075",
        "CPFD": "00090",
        "IPFD": "00091",
        "CCND": "00080",
        "SCPD": "00003",
        "SCSF": "00085",
        "SCHF": "00086",
        "SCSD": "00083",
        "SOSD": "00084",
        "SNAD": "00275",
    }
    return stdict.get(tp, "K"), sdic.get(tp, ""), sub_dic.get(tp, "")


def cnkiarticle_ref_info_parse(meta, allref, ref_down_date):
    ref_id = ""
    list_ref = []
    idx = 0
    ref_set = set()
    ref_dic = {}
    repeat_cnt = 0
    for k, v in allref.items():
        if len(v) > 0:
            v = json.loads(v)
            rlist = v["data"]["data"]
            tp = k.split("REF")[0].upper()
            strtype, tpcn, ref_sub_db_id = get_strtype(tp)
            for data in rlist:
                ref_one = {}
                ref_one["sub_db_id"] = ref_sub_db_id
                ref_one["cited_rawid"] = meta["rawid"]
                ref_one["cited_lngid"] = meta["lngid"]
                ref_one["cited_pub_year"] = meta["pub_year"]
                ref_one["strtype_raw"] = tp
                ref_one["strtype_cn"] = tpcn
                ref_one["strtype"] = strtype
                # if "source" not in data:
                #     print("----", meta["rawid_mysql"])
                ref_one = cnkithesisarticle_ref_parse(ref_one, data)
                ref_one.pop("sub_db_id")
                refer_text_site = ref_one["refer_text_site"]
                if refer_text_site != "." and len(refer_text_site) > 1:
                    keys = "{}_{}".format(tpcn, refer_text_site)
                    if keys not in ref_dic.keys():
                        ref_dic[keys] = {
                            "has_id": {},
                            "no_id": {}
                        }
                    if len(ref_one["linked_id"]) > 0:
                        ref_dic[keys]["has_id"][ref_one["linked_id"] + "_" + ref_one["ref_index"]] = ref_one
                    else:
                        ref_dic[keys]["no_id"][ref_one["ref_index"]] = ref_one
    if len(ref_dic) > 0:
        for k, v in ref_dic.items():
            hasID_dic = v["has_id"]
            noID_dic = v["no_id"]
            if len(hasID_dic) >= 1:
                temp_sets = set()
                for k, v in hasID_dic.items():
                    ref_one = v
                    if ref_one["linked_id"] in temp_sets:
                        repeat_cnt = repeat_cnt + 1
                    else:
                        idx += 1
                        ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                        ref_one["lngid"] = ref_lngid
                        ref_one["keyid"] = ref_lngid
                        ref_id = ref_id + ref_lngid + ";"
                        list_ref.append(ref_one)
            if len(noID_dic) >= 1:
                is_first = True
                for k, v in noID_dic.items():
                    if not is_first:
                        repeat_cnt = repeat_cnt + 1
                        continue
                    ref_one = v
                    idx += 1
                    ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                    ref_one["lngid"] = ref_lngid
                    ref_one["keyid"] = ref_lngid
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
                    is_first = False
    return ref_id, list_ref, str(repeat_cnt)


def get_json_val(dval, key):
    if isinstance(dval, list):
        for item in dval:
            if item["name"] == key:
                val = item.get("value")
                if val is None or val == "null":
                    val = ""
                return val
    else:
        val = dval.get(key, "")
        if val is None or val == "null":
            val = ""
        return val
    return ""


def cnkithesisarticle_ref_parse(data, ref_json):
    if "source" in ref_json:
        source = ref_json["source"]
    else:
        source = {}
    strtype = data["strtype"]
    metadata = ref_json["metadata"]
    old_linked_id = get_json_val(metadata, "FN")
    data["old_linked_id"] = old_linked_id
    if len(old_linked_id) > 0 and len(data["sub_db_id"]) > 0:
        data["linked_id"] = BaseLngid().GetLngid(data["sub_db_id"], old_linked_id)
    else:
        data["linked_id"] = ""
    data["title"] = get_json_val(metadata, "TI")
    author = get_json_val(metadata, "AU").replace(";;", ";").replace(",", ";")
    if len(author) > 0 and author[-1] == ";":
        author = author[:-1]
    data["author"] = author
    site_source_name = ""
    pub_place = ""
    site_pub_year = ""

    if len(source) > 0:
        site_source_name = get_json_val(source, "title")
        site_pub_year = get_json_val(source, "year")
        data["source_name"] = site_source_name
        data["pub_year"] = site_pub_year
        vol = get_json_val(source, "volume")
        if vol is None:
            vol = ""
        data["vol"] = vol
        data["num"] = get_json_val(source, "issue")
        if strtype == "N":
            site_pub_year = get_json_val(metadata, "DT")
    else:
        site_pub_year = get_json_val(metadata, "YE")
        data["pub_year"] = site_pub_year
        data["vol"] = ""
        data["num"] = get_json_val(metadata, "QI")
        if strtype == "M":
            site_source_name = get_json_val(metadata, "LY")
            pub_place = get_json_val(metadata, "出版地")
            data["publisher"] = site_source_name
        elif strtype == "P":
            site_pub_year = get_json_val(metadata, "PD")
            pub_place = get_json_val(metadata, "DB")
            site_source_name = get_json_val(metadata, "GKH")
            data["source_name"] = site_source_name
        elif strtype == "S":
            site_source_name = get_json_val(metadata, "标准号")
            data["source_name"] = site_source_name
        else:
            site_source_name = get_json_val(metadata, "LY")
            data["source_name"] = site_source_name
    line = get_json_val(metadata, "PM")
    begin_page = ""
    end_page = ""
    jump_page = ""
    data["page_info"] = line
    idx = line.find('+')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["doi"] = get_json_val(metadata, "DOI")
    ref_index = get_json_val(metadata, "index")
    data["ref_index"] = ref_index
    refer_text_raw = json.dumps(ref_json, ensure_ascii=False)
    data["refer_text_raw"] = refer_text_raw
    refer_text_site = ""
    if strtype == "S":
        if len(site_source_name) > 0:
            refer_text_site += site_source_name + ","
        if len(data["title"]) > 0:
            refer_text_site += data["title"] + "[{}].".format(data["strtype"])
    else:
        if len(author) > 0:
            refer_text_site += author.replace(";", ",") + "."
        if len(data["title"]) > 0:
            refer_text_site += data["title"] + "[{}].".format(data["strtype"])
        if len(pub_place) > 0:
            refer_text_site += pub_place + ":"
        if len(site_source_name) > 0:
            refer_text_site += site_source_name + ","
        if len(site_pub_year) > 0:
            refer_text_site += site_pub_year
        if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
            refer_text_site = refer_text_site[:-1]
        if strtype == "J":
            if len(data["vol"]) > 0:
                refer_text_site += "," + data["vol"]
            if len(data["num"]) > 0:
                refer_text_site += "({})".format(data["num"])
        if strtype in ("J", "M", "D", "C"):
            if len(data["begin_page"]) > 0:
                refer_text_site += ":" + data["begin_page"]
                if len(data["end_page"]) > 0:
                    refer_text_site += "-" + data["end_page"]
    data["refer_text_site"] = refer_text_site
    return data


def cnkiarticle_ref_info_parse_no_use(meta, allref, ref_down_date):
    ref_id = ""
    list_ref = []
    idx = 0
    ref_set = set()
    ref_dic = {}
    repeat_cnt = 0
    for k, v in allref.items():
        sel_ref = Selector(v)
        list_ref_div = sel_ref.xpath("//div[@class='essayBox']")
        if checkExist(list_ref_div):
            for div_sel in list_ref_div:
                tpcn = div_sel.xpath("./div[@class='dbTitle']/text()").get("").strip()
                tp = div_sel.xpath("./div[@class='dbTitle']/b[@class='titleTotle']/span[@name='pcount']/@id").get(
                    "").replace("pc_", "")
                if len(tp) > 0:
                    tp = tp.upper()
                list_li = div_sel.xpath("./ul/li")
                for li_sel in list_li:
                    ref_one = {}
                    # ref_one["is_deprecated"] = "0"
                    # ref_one["batch"] = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
                    ref_one["sub_db_id"] = meta["sub_db_id"]
                    # ref_one["product"] = meta["product"]
                    # ref_one["sub_db"] = meta["sub_db"]
                    # ref_one["down_date"] = ref_down_date
                    # ref_one["provider"] = meta["provider"]
                    ref_one["cited_rawid"] = meta["rawid"]
                    ref_one["cited_lngid"] = meta["lngid"]
                    ref_one["strtype_raw"] = tp
                    ref_one["strtype_cn"] = tpcn
                    ref_one["strtype"] = getStrtype(tp, tpcn)
                    ref_one = cnkithesisarticle_ref_parse(ref_one, li_sel)
                    ref_one.pop("sub_db_id")
                    refer_text_site = ref_one["refer_text_site"]
                    if refer_text_site != "." and len(refer_text_site) > 1:
                        keys = "{}_{}".format(tpcn, refer_text_site)
                        if keys not in ref_dic.keys():
                            ref_dic[keys] = {
                                "has_id": {},
                                "no_id": {}
                            }
                        if len(ref_one["linked_id"]) > 0:
                            ref_dic[keys]["has_id"][ref_one["linked_id"] + "_" + ref_one["ref_index"]] = ref_one
                        else:
                            ref_dic[keys]["no_id"][ref_one["ref_index"]] = ref_one
    if len(ref_dic) > 0:
        for k, v in ref_dic.items():
            hasID_dic = v["has_id"]
            noID_dic = v["no_id"]
            if len(hasID_dic) >= 1:
                temp_sets = set()
                for k, v in hasID_dic.items():
                    ref_one = v
                    if ref_one["linked_id"] in temp_sets:
                        repeat_cnt = repeat_cnt + 1
                    else:
                        idx += 1
                        ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                        ref_one["lngid"] = ref_lngid
                        ref_one["keyid"] = ref_lngid
                        ref_id = ref_id + ref_lngid + ";"
                        list_ref.append(ref_one)
            if len(noID_dic) >= 1:
                is_first = True
                for k, v in noID_dic.items():
                    if not is_first:
                        repeat_cnt = repeat_cnt + 1
                        continue
                    ref_one = v
                    idx += 1
                    ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
                    ref_one["lngid"] = ref_lngid
                    ref_one["keyid"] = ref_lngid
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
                    is_first = False
    return ref_id, list_ref, str(repeat_cnt)


def cnkithesisarticle_ref_parse_no_use(data, sel_ref):
    refer_text_raw = sel_ref.extract()
    data["refer_text_raw"] = refer_text_raw
    refer_text_site = re.sub(r"^\[\d+?\]", "", cleanSemicolon(sel_ref.xpath("string(.)").get()))
    data["refer_text_site"] = re.sub("\\s+", " ", refer_text_site).replace("\n", "").replace("\r", "").replace("&nbsp",
                                                                                                               "").strip()
    a_ref = sel_ref.xpath("./a[contains(@href, 'filename=')]")
    a_href = a_ref.xpath("./@href")
    cnki_dbcode = ""
    linked_id = ""
    if checkExist(a_href):
        url = a_href.get()
        url = url.split("?")
        datas = url[1].split("&")
        for item in datas:
            if "filename" in item:
                linked_id = item.replace("filename=", "")
            # cnki_dbcode = data[1].replace("dbcode=", "")
    # data["cnki_dbcode"] = cnki_dbcode
    if len(linked_id) == 0:
        inpt_val = sel_ref.xpath("./input[contains(@class, 'exportparams')]/@value").get("").strip()
        if len(inpt_val) > 0:
            tmps = inpt_val.split("!")
            if len(tmps) > 1:
                linked_id = tmps[1]
    data["old_linked_id"] = linked_id
    if len(linked_id) > 0:
        data["linked_id"] = BaseLngid().GetLngid(data["sub_db_id"], linked_id)
    else:
        data["linked_id"] = ""
    ref_index = sel_ref.xpath("./em/text()").get("").strip()
    data["ref_index"] = ref_index
    return data


def get_extra_meta(data, jrawids_dic):
    extra_meta = []
    cnki_sub = ["00169", "00451", "00452"]
    for sub_db_id in cnki_sub:
        v = json.loads(jrawids_dic[sub_db_id])
        jrawids = v["jrawids"]
        jinfo = v["info"]
        if data["journal_raw_id"].upper() + ";" in jrawids:
            new_data = deepcopy(data)
            sub_db_id = jinfo["sub_db_id"]
            lngid = BaseLngid().GetLngid(sub_db_id, data["rawid"])
            new_data["lngid"] = lngid
            new_data["keyid"] = lngid
            new_data["product"] = jinfo["product"]
            new_data["sub_db"] = jinfo["sub_db"]
            new_data["sub_db_id"] = sub_db_id
            new_data["provider"] = jinfo["provider"]
            new_data["zt_provider"] = jinfo["zt_provider"]
            extra_meta.append({"table": "journal_latest", "data": new_data})
            # extra_meta[sub_db_id] = new_data
    return extra_meta


def cnkicfjd_cnkicfjdhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = int(data['total_page'])
        if total_page > 200:
            return result
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        home_json = json.loads(callmodel.sql_model.home_json)
        his = home_json.get("his", "")
        for item in data['qk_info']['children']:
            result.befor_dicts.update.update({'page': total_page})
            journal_rawid = re.findall('CFJD/(.*?)\?', item['url'])[0]
            qk_name = item['title']
            fuhe = re.findall('复合影响因子：(.*?);', item['info'])
            zonghe = re.findall('综合影响因子：(.*)', item['info'])

            new_dict = dict()
            new_dict["journal_name"] = qk_name
            new_dict["fuhe"] = fuhe[0] if fuhe else ''
            new_dict["zonghe"] = zonghe[0] if zonghe else ''

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00169"
            temp_info["is_active"] = 1
            temp_info["subject"] = his
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00169",
                                    "is_active": 1})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model_next)

    return result


def cnkicjfx_cnkicjfxhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = int(data['total_page']) - 1
        if total_page > 200:
            return result
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        home_json = json.loads(callmodel.sql_model.home_json)
        his = home_json.get("his", "")
        for item in data['qk_info']['children']:
            result.befor_dicts.update.update({'page': total_page})
            journal_rawid = re.findall('km=(.*)', item['url'])[0]
            qk_name = item['title']

            new_dict = dict()
            new_dict["journal_name"] = qk_name

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00451"
            temp_info["is_active"] = 1
            temp_info["subject"] = his
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00451",
                                    "is_active": 1})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model_next)

    return result


# def cnkicjfr_cnkicjfrhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
#     result = DealModel()
#     para_dicts = callmodel.para_dicts
#     task_info = callmodel.redis_all.parse_dict["1_1"].task_info
#     info_dicts = {"task_name": callmodel.sql_model.task_name,
#                   "task_tag": callmodel.sql_model.task_tag,
#                   "sub_db_id": callmodel.sql_model.sub_db_id,
#                   "task_tag_next": task_info.task_tag_next}
#     if "1_1" in para_dicts["data"]:
#         data = para_dicts["data"]["1_1"]
#         total_page = int(data['total_page'])
#         if total_page > 200:
#             return result
#         result.code_dicts = {
#             "1_1": {"max_page": total_page}
#         }
#         page_index = int(callmodel.sql_model.page_index)
#         turn_page = task_info.turn_page
#         if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
#             sql_dict = callmodel.sql_model.dict()
#             tmp_list = [
#                 {
#                     "key": "task_name",
#                     "value": sql_dict["task_name"],
#                     "operator": "=",
#                     "description": "and"
#                 },
#                 {
#                     "key": "task_tag",
#                     "value": sql_dict["task_tag"],
#                     "operator": "=",
#                     "description": "and"
#                 },
#                 {
#                     "key": "home_rawid",
#                     "value": sql_dict["home_rawid"],
#                     "operator": "=",
#                     "description": "and"
#                 },
#                 {
#                     "key": "page_index",
#                     "value": total_page,
#                     "operator": ">",
#                     "description": "and"
#                 }
#             ]
#             du_model_bef_1 = DealUpdateModel()
#             du_model_bef_1.update.update({"is_active": "0"})
#             list_op = []
#             for item in tmp_list:
#                 op = OperatorSqlModel()
#                 list_op.append(op.parse_obj(item))
#             du_model_bef_1.where = list_op
#             result.befor_dicts.update_list.append(du_model_bef_1)
#             tmp_list[-1]["operator"] = "<="
#             du_model_bef_2 = DealUpdateModel()
#             du_model_bef_2.update.update({"is_active": "1"})
#             list_op1 = []
#             for item in tmp_list:
#                 op = OperatorSqlModel()
#                 list_op1.append(op.parse_obj(item))
#             du_model_bef_2.where = list_op1
#             result.befor_dicts.update_list.append(du_model_bef_2)
#
#             di_model_bef = DealInsertModel()
#             di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
#             sql_dict.pop("id")
#             sql_dict.pop("update_time")
#             sql_dict.pop("create_time")
#             sql_dict.pop("null_dicts")
#             sql_dict.pop("err_msg")
#             sql_dict.pop("other_dicts")
#             sql_dict.pop("state")
#             sql_dict.pop("failcount")
#
#             for page in range(page_index, total_page + 1):
#                 sql_dict["page"] = total_page
#                 sql_dict["page_index"] = page
#                 di_model_bef.lists.append(sql_dict.copy())
#             result.befor_dicts.insert.append(di_model_bef)
#         di_model_next = DealInsertModel()
#         di_model_next.insert_pre = CoreSqlValue.insert_ig_it
#         home_json = json.loads(callmodel.sql_model.home_json)
#         his = home_json.get("his", "")
#         for item in data['qk_info']['children']:
#             result.befor_dicts.update.update({'page': total_page})
#             journal_rawid = re.findall('BaseID=(.*?)&', item['url'])[0]
#             qk_name = item['title']
#
#             new_dict = dict()
#             new_dict["journal_name"] = qk_name
#
#             temp_info = info_dicts.copy()
#             task_tag_next = temp_info["task_tag_next"]
#             temp_info["task_tag"] = task_tag_next
#             del temp_info["task_tag_next"]
#             temp_info["journal_rawid"] = journal_rawid
#             temp_info["sub_db_id"] = "00452"
#             temp_info["is_active"] = 1
#             temp_info["subject"] = his
#             temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
#             di_model_next.lists.append(temp_info)
#
#             du_model = DealUpdateModel()
#             # du_model.update_no_placeholder.update({"page": max_page})
#             du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
#                                     "sub_db_id": "00452",
#                                     "is_active": 1})
#             du_model.where.update({"journal_rawid": journal_rawid,
#                                    "task_tag": temp_info["task_tag"],
#                                    "task_name": callmodel.sql_model.task_name
#                                    # "is_active": "0"
#                                    })
#             du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
#             result.next_dicts.update_list.append(du_model)
#
#         result.next_dicts.insert.append(di_model_next)
#
#     return result


def cnkicjfr_cnkicjfrhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page_1=data['total_page']
        total_page = int(int(total_page_1)/20)+1
        if total_page > 200:
            return result
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        home_json = json.loads(callmodel.sql_model.home_json)
        his = home_json.get("his", "")
        for item in data['qk_info']['children']:
            result.befor_dicts.update.update({'page': total_page})
            # journal_rawid = re.findall('BaseID=(.*?)&', item['url'])[0]
            journal_rawid = re.findall(r'pykm=([^&]+)', item['url'])[0]
            qk_name = item['title']

            new_dict = dict()
            new_dict["journal_name"] = qk_name

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00452"
            temp_info["is_active"] = 1
            temp_info["subject"] = his
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00452",
                                    "is_active": 1})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model_next)

    return result
