import json
import re
import time
from copy import deepcopy

import math
from math import ceil

from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basestring import BaseString
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_g import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, JournalListModel, JournalIssueModel, \
    DealInsertModel, DealUpdateModel, JournalHomeModel, OperatorSqlModel, EtlDealModel

__all__ = ["wanfangjournal_wanfangqkhome_callback",
           "wanfangjournal_wanfangclasshome_callback",
           "wanfangjournal_wanfangqkclasshome_callback",
           "wanfangjournal_wanfangjournallist_callback",
           "wanfangjournal_wanfangissue_callback",
           "wanfangjournal_wanfangarticle_callback",
           "wanfangmed_wanfangmedhome_callback",
           "wanfangjournal_wanfangarticle_etl_callback"]


def wanfangjournal_wanfangjournallist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    journal_json_dicts = {}
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    journal_rawid = callmodel.sql_model.journal_rawid
    d_i_model = DealInsertModel()
    # d_i_model.insert_pre = CoreSqlValue.replace_it
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "journal_rawid": journal_rawid,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    journal_json_dicts = para_dicts["data"]["1_1"]
    journal_json_dicts["collect_database"] = BaseString.cleanSemicolon(journal_json_dicts.get("collect_database", ""))
    journal_json_dicts['dep_name'] = para_dicts["data"]["1_2"]['dep_name']
    journal_json_dicts['perio_desc'] = para_dicts["data"]["1_2"]['perio_desc']
    issue_list = para_dicts["data"]["1_3"]['issue_list']['children']
    journal_json_dicts['last_year'] = issue_list[0]['year']
    # from app_main import app
    # await app.state.redis.hset("cnki_qk_list", journal_rawid, issue_list[0]['year'])
    for issue in issue_list:
        pub_year = issue["year"]
        for item in issue["children"]:
            issue_json = {}
            num = item["num"].replace("期", "")
            url = item["href"]
            isSync = BaseUrl.urlQuery2Dict(url)["isSync"]
            issue_json["isSync"] = isSync
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
            del temp["task_tag_next"]
            temp.update({'pub_year': pub_year, 'num': num, "page": "1", "page_index": 1,
                         'issue_json': json.dumps(issue_json, ensure_ascii=False)})
            d_i_model.lists.append(temp)
            # 下面的更新部分因为上面设定为空字典而没有什么意义, 由于写都写了, 删了也浪费, 保持结构一致性就留着
            dum = DealUpdateModel()
            dum.update = {
                'issue_json': temp['issue_json']
            }
            dum.where = {
                "task_name": temp['task_name'],
                "task_tag": temp['task_tag'],
                "journal_rawid": temp['journal_rawid'],
                "pub_year": temp['pub_year'],
                "num": temp['num']
            }
            result.next_dicts.update_list.append(dum)
    result.next_dicts.insert.append(d_i_model)
    qk_dicts = json.loads(callmodel.sql_model.journal_json)
    qk_dicts.update(journal_json_dicts)
    result.befor_dicts.update.update({"journal_json": json.dumps(qk_dicts, ensure_ascii=False)})
    result.other_dicts.update({"last_year": issue_list[0]['year']})
    result.other_dicts.update({"collect_database": journal_json_dicts["collect_database"]})
    return result

    # lists = para_dicts["data"]["1_1"]["perio_evaluate"]["children"]
    # wf_impact = "0"
    # for item in lists:
    #     if item["affectoi"].find("影响因子") > -1:
    #         wf_impact = item["affectoi"].split("：")[-1].strip()
    # journal_json_dicts["wf_impact"] = wf_impact
    # lists2 = para_dicts["data"]["1_2"]["synopsis_select"]["children"]
    # for item in lists2:
    #     value = item["div_tag"].replace("\xa0", "")
    #     value = value.strip()
    #     if value.find("主编") > -1:
    #         major_editor = item["div_tag"].split("：")[-1].strip()
    #         journal_json_dicts["chief_editor"] = major_editor
    # lists3 = para_dicts["data"]["1_3"]["formalPublishes"]
    # # 为了获得最新年份,在此直接取1_3里面的数据
    # collect_newyear_dict = lists3[0]
    # collect_newyear = collect_newyear_dict["name"]
    # journal_json_dicts["last_year"] = collect_newyear
    # for item in lists3:
    #     pub_year = item["name"]
    #     for item2 in item["perioBatches"]:
    #         num = item2["pn"]
    #         temp = info_dicts.copy()
    #         temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
    #         del temp["task_tag_next"]
    #         temp.update({'pub_year': pub_year, 'num': num, "page": "1",
    #                      'issue_json': json.dumps(issue_json, ensure_ascii=False)})
    #         # result.next_dicts["insert_ig"]["lists"].append(temp)
    #         d_i_model.lists.append(temp)
    # result.next_dicts.insert.append(d_i_model)
    # qk_dicts = json.loads(callmodel.sql_model.journal_json)
    # qk_dicts.update(journal_json_dicts)
    # result.befor_dicts.update.update({"journal_json": json.dumps(qk_dicts, ensure_ascii=False)})
    # return result


# def wanfangjournal_wanfangissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
#     dealmodel = DealModel()
#     pageTotal = callmodel.para_dicts["1_1"]["data"]["1"]["pageTotal"]
#     totalRow = callmodel.para_dicts["1_1"]["data"]["1"]["totalRow"]
#     task_info = callmodel.redis_all.parse_dict["1_1"].task_info
#     dealmodel.befor_dicts.update.update({"articlecount": totalRow, "page": pageTotal})
#
#     info_dicts = {"task_name": callmodel.sql_model.task_name,
#                   "task_tag": callmodel.sql_model.task_tag,
#                   "sub_db_id": callmodel.sql_model.sub_db_id,
#                   "task_tag_next": task_info.task_tag_next}
#
#     pub_year = callmodel.sql_model.pub_year
#     num = callmodel.sql_model.num
#     journal_rawid = callmodel.sql_model.journal_rawid
#     d_i_model = DealInsertModel()
#     d_i_model.insert_pre = CoreSqlValue.replace_it
#     for page, one_page in callmodel.para_dicts["1_1"]["data"].items():
#         if one_page["pageRow"] is not None:
#             for dict_item in one_page["pageRow"]:
#                 rawid = dict_item["Id"]
#                 # 有可能出现没有title的信息
#                 title = dict_item.get("Title", "")
#                 page_info = dict_item.get("Page", "")
#                 column_info = dict_item.get("Column", "")
#                 author = dict_item.get("Creator", "")
#
#                 temp = info_dicts.copy()
#                 temp["task_tag"] = temp["task_tag_next"]
#                 del temp["task_tag_next"]
#                 temp["rawid"] = rawid
#                 json_temp = {
#                     "title": title,
#                     "page_info": page_info,
#                     "column_info": column_info,
#                     "author": author,
#                     "pub_year": pub_year,
#                     "num": num,
#                     "journal_rawid": journal_rawid
#                 }
#                 temp["article_info_json"] = json.dumps(json_temp, ensure_ascii=False)
#                 # dealmodel.next_dicts["replace_"]["lists"].append(temp)
#                 d_i_model.lists.append(temp)
#     dealmodel.next_dicts.insert.append(d_i_model)
#     dealmodel.code_dicts = {"1_1": {"max_page": str(pageTotal)}}
#     return dealmodel


def wanfangjournal_wanfangissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    dealmodel = DealModel()
    totalRow = callmodel.para_dicts["data"]["1_1"]["articleCount"]
    if totalRow == "":
        totalRow = 0
    pageTotal = ceil(int(totalRow) / 10)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info

    dealmodel.befor_dicts.update.update({"articlecount": totalRow, "page": pageTotal})
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    base_time = BaseTime().get_beijin_date_strins()
    batch = base_time[0:8] + "_" + base_time[-6:]
    issue_json = sql_model.issue_json
    issue_json = json.loads(issue_json)
    issue_json["after_batch"] = batch

    pub_year = callmodel.sql_model.pub_year
    num = callmodel.sql_model.num
    journal_rawid = callmodel.sql_model.journal_rawid
    page_index = int(callmodel.sql_model.page_index)
    turn_page = task_info.turn_page
    if (turn_page == 7 and page_index == 0) or (turn_page == 8 and page_index == 1):
        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        for page in range(page_index, pageTotal + 1):
            sql_dict["page"] = pageTotal
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
        dealmodel.befor_dicts.insert.append(di_model_bef)

    d_i_model = DealInsertModel()
    # d_i_model.insert_pre = CoreSqlValue.replace_it
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    data_dicts = callmodel.para_dicts["data"]["1_1"]
    if data_dicts["articleCount"] is not None:
        for dict_item in data_dicts["issue_info"]["children"]:
            # 部分可能首条就没有栏目信息
            column_info_1 = ""
            if BaseDicts.is_dict_exit_key(dict_item, "column_info") != "":
                column_info_1 = BaseDicts.is_dict_exit_key(dict_item, "column_info")
            if BaseDicts.is_dict_exit_key(dict_item, "title") != "":
                rawid = dict_item["url"].split("/")[-1]
                # 有可能出现没有title的信息
                title = dict_item.get("title", "")
                page_info = dict_item.get("page_info", "")
                column_info = column_info_1
                author = dict_item.get("authors", "")

                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                temp["rawid"] = rawid
                json_temp = {
                    "title": title,
                    "page_info": page_info,
                    "column_info": column_info,
                    "author": author,
                    "pub_year": pub_year,
                    "num": num,
                    "journal_rawid": journal_rawid,
                    "before_batch": batch
                }
                temp["article_info_json"] = json.dumps(json_temp, ensure_ascii=False)
                # dealmodel.next_dicts["replace_"]["lists"].append(temp)
                d_i_model.lists.append(temp)
                du_model = DealUpdateModel()
                du_model.update = {
                    'article_info_json': json.dumps(json_temp, ensure_ascii=False)
                }
                du_model.where = {
                    "task_name": temp['task_name'],
                    "task_tag": temp['task_tag'],
                    "rawid": temp['rawid']
                }
                dealmodel.next_dicts.update_list.append(du_model)

    dealmodel.next_dicts.insert.append(d_i_model)
    dealmodel.code_dicts = {"1_1": {"max_page": str(pageTotal)}}
    return dealmodel


def wanfangjournal_wanfangarticle_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    dealmodel = DealModel()
    html_dicts = callmodel.para_dicts
    html_ref_dicts = html_dicts["1_1"]["data"]["1"]
    if "raw_base64" in html_ref_dicts.keys():
        del html_ref_dicts["raw_base64"]
    if html_ref_dicts:
        html_ref_total = html_ref_dicts["total"]
    else:
        # 如果没有记做0页
        html_ref_total = 0
    html_ref_page = ceil(int(html_ref_total) / 10)
    dealmodel.code_dicts.update({"1_1": {"max_page": html_ref_page}})
    if "1_2" in html_dicts.keys():
        html_cite_dicts = html_dicts["1_2"]["data"]["1"]
        if "raw_base64" in html_cite_dicts.keys():
            del html_cite_dicts["raw_base64"]
        if html_cite_dicts:
            html_cite_total = html_cite_dicts["total"]
        else:
            # 如果没有记做0页
            html_cite_total = 0
        html_cite_page = ceil(int(html_cite_total) / 10)
        dealmodel.code_dicts.update({"1_2": {"max_page": html_cite_page}})
    return dealmodel


def wanfangjournal_wanfangclasshome_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        d_i_model = DealInsertModel()
        d_i_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['clusterField']['ClassCode']['cluster']:
            temp = info_dicts.copy()
            field = item["field"]  # 哲学政法
            number = item["number"]
            name = item["name"]  # 0/B
            name = name.replace("0/", "1/")
            temp["home_rawid"] = name
            json_temp = {
                "field": field,
                "number": number
            }

            if name.count("/") > 1:
                temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
                del temp["task_tag_next"]
                befor_field = json.loads(callmodel.sql_model.home_json)["field"]
                json_temp["his"] = befor_field + "->" + field
            else:
                del temp["task_tag_next"]
            temp["page_index"] = 0
            temp["home_json"] = json.dumps(json_temp, ensure_ascii=False)

            d_i_model.lists.append(temp)
        result.befor_dicts.update.update({'is_active': 1})
        result.befor_dicts.insert.append(d_i_model)
    return result


def wanfangjournal_wanfangqkhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total = data["total"]
        total_page = math.ceil(int(total) / 50)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            for page in range(page_index, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]:
            result.befor_dicts.update.update({'page': total_page})
            journal_rawid = item["Id"]
            qk_name = item['Title'][0].strip()
            trans_title = ''
            journal_name_alt = ""
            journal_name_korea = ""
            if len(item['Title']) >= 2:
                trans_title = item['Title'][1].strip()
                lst = trans_title.split('  ')
                journal_name_alt = lst[0]
                journal_name_korea = ''
                if len(lst) == 2:
                    journal_name_korea = lst[1]
            hostunit_name = ";".join(item["Sponsor"])  # 主办单位
            dep_name = item["CompetentDepartment"].strip()  # 主管单位
            dep_name = dep_name.replace("􀪋", "")
            publish_cycle = item.get('PublishingPeriod', '').strip()  # 出版周期
            issn = item.get('ISSN', '').strip()
            cn = item.get('CN', '').strip()
            src_db = ''
            try:
                src_db = ";".join(item['SourceDB'])
            except:
                pass
            languages = item['Language']
            languages2 = []
            for lang in languages:
                if lang == "chi":
                    languages2.append("中文")
                elif lang == "eng":
                    languages2.append("英文")
                else:
                    languages2.append(lang)
            language = ";".join(languages2)

            ImpactFactor = item.get('ImpactFactor', '')  # 影响因子
            # major_editor = dic.get('major_editor', '').strip()  # 主编

            former_name = ";".join(item["FormerTitle"])  # 曾用刊名
            perio_desc = item.get('Introduction', '').strip()  # 期刊简介

            Award = ";".join(item.get('Award', ''))  # 获奖情况
            Address = item.get('Address', '').strip()  # 地址
            Postcode = item.get('Postcode', '').strip()  # 邮政编码
            Url = item.get('Url', '').strip()  # 刊Url
            Telephone = item.get('Telephone', '').strip()  # 电话
            Email = item.get('Email', '').strip()  # Email
            LastYear = str(item.get('LastYear', ''))  # 最新年
            LastIssue = item.get('LastIssue', '').strip()  # 最终期
            Editorial = item.get('Editorial', '').strip()  # 编辑部
            SponsorRegion = item.get('SponsorRegion', '').strip()  # 主办单位城市
            Director = item.get('Director', '').strip()  # 主管
            Director = Director.replace("􀪋", "")
            ChiefEditor = item.get('ChiefEditor', '').strip()  # 主编
            IssuedPeriod = item.get('IssuedPeriod', '').strip()  # 出版周期
            IsStopped = item.get('IsStopped', '')  # 是否停刊

            if IsStopped:
                IsStopped = '1'
            else:
                IsStopped = '0'

            FoundYear = str(item.get('FoundYear', ''))  # 创刊年
            Fax = item.get('Fax', '').strip()  # 传真
            new_dict = {}

            new_dict["journal_name_alt"] = journal_name_alt
            new_dict["journal_name_korea"] = journal_name_korea
            new_dict["qk_name"] = qk_name
            new_dict["trans_title"] = trans_title
            new_dict["hostunit_name"] = hostunit_name
            new_dict["dep_name"] = dep_name
            new_dict["publish_cycle"] = publish_cycle
            new_dict["issn"] = issn
            new_dict["cn"] = cn
            new_dict["src_db"] = src_db
            new_dict["language"] = language
            new_dict["former_name"] = former_name
            new_dict["perio_desc"] = perio_desc
            new_dict["wf_impact"] = ImpactFactor
            new_dict["post_code"] = Postcode
            new_dict["type_name"] = IssuedPeriod
            new_dict["chief_editor"] = ChiefEditor
            new_dict["is_stop"] = IsStopped
            new_dict["email"] = Email
            new_dict["edit_office_addr"] = Address
            new_dict["tel_code"] = Telephone
            new_dict["web_site"] = Url
            new_dict["fax"] = Fax
            new_dict["award_state"] = Award
            new_dict["last_year"] = LastYear
            new_dict["LastIssue"] = LastIssue
            new_dict["Editorial"] = Editorial
            new_dict["SponsorRegion"] = SponsorRegion
            new_dict["Director"] = Director
            new_dict["FoundYear"] = FoundYear

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00004"
            temp_info["is_active"] = "1"
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)
            du_model = DealUpdateModel()
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00004",
                                    "is_active": "1"})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name,
                                   # "is_active": "0"
                                   })

            result.next_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)
    return result


def wanfangjournal_wanfangqkclasshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    home_json = json.loads(callmodel.sql_model.home_json)
    his = home_json["his"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total = data["total"]
        total_page = math.ceil(int(total) / 50) - 1
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["value"]:
            journal_rawid = item["Id"]
            qk_name = item['Title'][0].strip()
            trans_title = ''
            journal_name_alt = ""
            journal_name_korea = ""
            if len(item['Title']) >= 2:
                trans_title = item['Title'][1].strip()
                lst = trans_title.split('  ')
                journal_name_alt = lst[0]
                journal_name_korea = ''
                if len(lst) == 2:
                    journal_name_korea = lst[1]
            hostunit_name = ";".join(item["Sponsor"])  # 主办单位
            dep_name = item["CompetentDepartment"].strip()  # 主管单位
            dep_name = dep_name.replace("􀪋􀪋􀪋􀪋", "")
            publish_cycle = item.get('PublishingPeriod', '').strip()  # 出版周期
            issn = item.get('ISSN', '').strip()
            cn = item.get('CN', '').strip()
            src_db = ''
            try:
                src_db = ";".join(item['SourceDB'])
            except:
                pass
            languages = item['Language']
            languages2 = []
            for lang in languages:
                if lang == "chi":
                    languages2.append("中文")
                elif lang == "eng":
                    languages2.append("英文")
                else:
                    languages2.append(lang)
            language = ";".join(languages2)

            ImpactFactor = item.get('ImpactFactor', '')  # 影响因子
            # major_editor = dic.get('major_editor', '').strip()  # 主编

            former_name = ";".join(item["FormerTitle"])  # 曾用刊名
            perio_desc = item.get('Introduction', '').strip()  # 期刊简介

            Award = ";".join(item.get('Award', ''))  # 获奖情况
            Address = item.get('Address', '').strip()  # 地址
            Postcode = item.get('Postcode', '').strip()  # 邮政编码
            Url = item.get('Url', '').strip()  # 刊Url
            Telephone = item.get('Telephone', '').strip()  # 电话
            Email = item.get('Email', '').strip()  # Email
            LastYear = str(item.get('LastYear', ''))  # 最新年
            LastIssue = item.get('LastIssue', '').strip()  # 最终期
            Editorial = item.get('Editorial', '').strip()  # 编辑部
            SponsorRegion = item.get('SponsorRegion', '').strip()  # 主办单位城市
            Director = item.get('Director', '').strip()  # 主管
            ChiefEditor = item.get('ChiefEditor', '').strip()  # 主编
            IssuedPeriod = item.get('IssuedPeriod', '').strip()  # 出版周期
            IsStopped = item.get('IsStopped', '')  # 是否停刊
            if IsStopped:
                IsStopped = '1'
            else:
                IsStopped = '0'

            FoundYear = str(item.get('FoundYear', ''))  # 创刊年
            Fax = item.get('Fax', '').strip()  # 传真
            new_dict = {}

            new_dict["journal_name_alt"] = journal_name_alt
            new_dict["journal_name_korea"] = journal_name_korea
            new_dict["qk_name"] = qk_name
            new_dict["trans_title"] = trans_title
            new_dict["hostunit_name"] = hostunit_name
            new_dict["dep_name"] = dep_name
            new_dict["publish_cycle"] = publish_cycle
            new_dict["issn"] = issn
            new_dict["cn"] = cn
            new_dict["src_db"] = src_db
            new_dict["language"] = language
            new_dict["former_name"] = former_name
            new_dict["perio_desc"] = perio_desc
            new_dict["wf_impact"] = ImpactFactor
            new_dict["post_code"] = Postcode
            new_dict["type_name"] = IssuedPeriod
            new_dict["chief_editor"] = ChiefEditor
            new_dict["is_stop"] = IsStopped
            new_dict["email"] = Email
            new_dict["edit_office_addr"] = Address
            new_dict["tel_code"] = Telephone
            new_dict["web_site"] = Url
            new_dict["fax"] = Fax
            new_dict["award_state"] = Award
            new_dict["last_year"] = LastYear
            new_dict["LastIssue"] = LastIssue
            new_dict["Editorial"] = Editorial
            new_dict["SponsorRegion"] = SponsorRegion
            new_dict["Director"] = Director
            new_dict["FoundYear"] = FoundYear

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00004"
            temp_info["is_active"] = "1"
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00004",
                                    "is_active": "1"})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name})

            result.next_dicts.update_list.append(du_model)
        result.befor_dicts.update.update({'page': total_page})
        result.next_dicts.insert.append(di_model_next)
    return result


def cleanSemicolon(text):
    text = text.replace('；', ';')  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def getJsonVal(data, key):
    val = data.get(key, "")
    if isinstance(val, list):
        if len(val) > 0:
            val = cleanSemicolon(";".join(val))
        else:
            val = ""
    else:
        val = cleanSemicolon(str(val))
    return val


def wanfangjournal_wanfangarticle_etl_callback(callmodel) -> EtlDealModel:
    # 1_1 引文 1_2被引 1_3题录
    # 20220323修改
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    jrawids_dic = para_dicts["journal_rawids"]
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    journal_info = sql_model["journal_info"]
    src_data = down_model["1_3"]
    ref_raw = down_model["1_1"]
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    src_data = json.loads(src_data.html).get("Resource")
    if not src_data or len(src_data) == 0:
        result.status = "FAILED"
        result.code = 2
        result.err_msg = "wanfangjournal_wanfangarticle_etl_callback 原始json无detail字段"
        return result
    src_data = src_data[0]["periodical"]
    data = {}
    # data_refcnt = para_dicts["data"]["1_2"]
    # data_ref = down_model["1_3"].dict()
    rawid = src_data["wanId"]
    cited_cnt = str(src_data.get("citedcount", ""))
    if cited_cnt == "" or not cited_cnt.isdigit():
        cited_cnt = "0"
    cited_cnt = "{}@{}".format(cited_cnt, down_date)
    data["cited_cnt"] = cited_cnt
    # data["ref_cnt"] = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    sub_db_id = "00004"
    product = "WANFANG"
    sub_db = "CSPD"
    provider = "WANFANG"
    source_type = "3"
    data["is_deprecated"] = "0"
    data["rawid"] = rawid
    data["rawid_mysql"] = sql_model["rawid"]
    # data["rawid_alt"] = rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = product
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["provider"] = provider
    data["zt_provider"] = "wangfangjournal"
    data["source_type"] = source_type
    data["provider_url"] = "http://www.wanfangdata.com.cn/details/detail.do?_type=perio&id=" + rawid
    jnames = src_data.get("periodicaltitleList", "")
    journal_name = ""
    journal_name_alt = ""
    if len(jnames) == 0:
        journal_name = journal_info.get("journal_name", "")
    else:
        journal_name = jnames[0]
        if len(jnames) == 2:
            journal_name_alt = jnames[1]
    if len(journal_name_alt) == 0:
        journal_name_alt = journal_info.get("journal_name_alt", "")
    data["journal_raw_id"] = src_data.get("periodicalid", "")
    data["journal_name"] = journal_name
    data["journal_name_alt"] = journal_name_alt
    issn = src_data.get("issn", "")
    if len(issn) == 0:
        issn = journal_info.get("issn", "")
    data["issn"] = issn.replace('_x000d_','').replace('_x000','').strip()
    cnno = src_data.get("cn", "").replace('_x000d_','').replace('_x000','').strip()
    if len(cnno) == 0:
        cnno = journal_info.get("cnno", "")
    data["cnno"] = cnno
    data["country"] = "CN"
    language = "ZH"
    if "英文" in journal_info["language"]:
        language = "EN"
    data["language"] = language
    abstract_ = ""
    abstract_alt = ""
    tmps_abs = src_data.get("abstractList", "")
    if len(tmps_abs) > 0:
        abstract_ = tmps_abs[0]
        if len(tmps_abs) == 2:
            abstract_alt = tmps_abs[1]
    data["abstract"] = cleanSemicolon(abstract_)
    data["abstract_alt"] = cleanSemicolon(abstract_alt)
    data["doi"] = getJsonVal(src_data, "doi").replace(' ', '').replace('－', '-')
    title = ""
    title_alt = ""
    print(lngid)
    tmps_title = src_data.get("titleList", "")
    if len(tmps_title) > 0:
        title = tmps_title[0]
        if len(tmps_title) == 2:
            title_alt = tmps_title[1]
    if len(title) < 1 and len(title_alt) > 0:
        title = title_alt
        title_alt = ""
    sub_title_list = src_data.get("subtitleList", [])
    sub_title = ''
    if sub_title_list:
        sub_title = sub_title_list[0]
    data["title"] = cleanSemicolon(title+sub_title)
    data["title_alt"] = cleanSemicolon(title_alt)
    data["author"] = getJsonVal(src_data, "creatorList")
    # 20251110 增
    data["corr_author"] = ';'.join(src_data.get('correspondingauthorList',[]))
    data["organ"] = getJsonVal(src_data, "originalorganizationList")
    pub_date = getJsonVal(src_data, "publishdate")
    if len(pub_date) > 0:
        pub_date = pub_date.split(" ")[0].replace("-", "")
    # 20250305 改
    # pub_year = getJsonVal(src_data, "publishyear")
    # if pub_year == "":
    pub_year = pub_date[0:4]
    pub_date_alt = getJsonVal(src_data, "metadataonlinedate")
    if len(pub_date_alt) > 0:
        pub_date_alt = pub_date_alt.split(" ")[0].replace("-", "")
        if len(pub_date) == 0 and len(pub_year) == 0:
            pub_date = pub_date_alt
            pub_year = pub_date[0:4]
    # revision_date = getJsonVal(src_data, "lastmodifiedtime")
    # if len(revision_date) > 0:
    #     revision_date = revision_date.split(" ")[0].replace("-", "")

    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["pub_date_alt"] = pub_date_alt
    # data["revision_date"] = revision_date
    data["vol"] = getJsonVal(src_data, "volum")
    data["num"] = getJsonVal(src_data, "issue")
    clc_no = ""
    clc_no_1st = ""
    tmps_cls = src_data.get("originalclasscodeList", "")
    if len(tmps_cls) > 0:
        clc_no = cleanSemicolon(";".join(tmps_cls))
        clc_no_1st = cleanSemicolon(tmps_cls[0])
    data["clc_no"] = clc_no
    data["clc_no_1st"] = clc_no_1st
    data["clc_machine"] = cleanSemicolon(getJsonVal(src_data, "machinedclasscodeList"))
    data["keyword"] = getJsonVal(src_data, "keywordsList")
    data["keyword_alt"] = getJsonVal(src_data, "foreignkeywordsList")
    # 20241105 修改 当keyword 无值时判断 title 是否存在中文 来填充  当keyword 依次用 foreignkeywordsList machinedkeywordsList
    if not data["keyword"] and not bool(re.search(r'[\u4e00-\u9fa5]', data["title"])):
        data["keyword"] = getJsonVal(src_data, "foreignkeywordsList")
    if not data["keyword"] and not bool(re.search(r'[\u4e00-\u9fa5]', data["title"])):
        data["keyword"] = getJsonVal(src_data, "machinedkeywordsList")
    data["keyword_machine"] = getJsonVal(src_data, "machinedkeywordsList")
    jump_page = ""
    begin_page = ""
    end_page = ""
    line = getJsonVal(src_data, "page")
    data["page_info"] = line
    idx = line.find(',')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["page_cnt"] = getJsonVal(src_data, "pageno")
    down_cnt = getJsonVal(src_data, "downloadcount")
    if down_cnt == "" or not down_cnt.isdigit():
        down_cnt = "0"
    data["down_cnt"] = "{}@{}".format(down_cnt, down_date)
    pv_cnt = getJsonVal(src_data, "metadataviewcount")
    if pv_cnt == "" or not pv_cnt.isdigit():
        pv_cnt = "0"
    data["pv_cnt"] = "{}@{}".format(pv_cnt, down_date)
    data["column_info"] = getJsonVal(src_data, "columnList")
    data["src_db"] = getJsonVal(src_data, "singlesourcedb")
    is_oa = "0"
    if getJsonVal(src_data, "isoa") == "True":
        is_oa = "1"
    data["is_oa"] = is_oa
    fulltext_type = ""
    if getJsonVal(src_data, "hasfulltext") == "True":
        fulltext_type = "pdf"
    data["fulltext_type"] = fulltext_type
    data["author_1st"] = getJsonVal(src_data, "firstcreator")
    data["author_alt"] = getJsonVal(src_data, "foreigncreatorList")
    data["fund"] = getJsonVal(src_data, "fundList")
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "wanfangjournal_wanfangarticle_etl_callback 解析rawid出错"
    elif data["rawid"].startswith("pre_"):
        err_msg = "wanfangjournal_wanfangarticle_etl_callback 解析rawid 以pre_打头"
    elif len(data["title"]) < 1 and len(data["title_alt"]) < 1:
        err_msg = "wanfangjournal_wanfangarticle_etl_callback 解析title出错"
    # 修改于20250407 不再使用这个规则判断
    # elif data["journal_raw_id"] not in ('ddjysjyjxyj','rsybl') and (len(data["src_db"]) < 1 or (
    #         "WF" not in data["src_db"] and "ISTIC" not in data["src_db"] and "CMA" not in data["src_db"])):
    #     err_msg = "wanfangjournal_wanfangarticle_etl_callback 解析src_db出错"
    #     code = 11
    # elif not data["rawid"].lower().startswith(data["journal_raw_id"].lower()):
    #     err_msg = "wanfangjournal_wanfangarticle_etl_callback 解析详情页rawid不以journal_raw_id打头"
    else:
        status = "SUCCESS"
    if status == "FAILED":
        result.status = status
        result.err_msg = err_msg
        result.code = code
        return result
    save_data = []
    save_data.append({"table": "journal_latest", "data": data})
    extra_meta = get_extra_meta(data, jrawids_dic)
    save_data.extend(extra_meta)
    ref_id = ""
    list_ref = []
    idx = 0
    ref_raw = json.loads(ref_raw.json())
    stdict = {
        "periodical": ["J","00004"],
        "thesis": ["D","00005"],
        "conference": ["C","00105"],
        "patent": ["P","00052"],
        "standard": ["S","00030"],
        "book": ["M",""]
    }
    for k, v in ref_raw["page_html"].items():
        if v and "Resource" in v.get("html"):
            detail_dict = json.loads(v["html"])["Resource"]
            ref_down_date = v["down_date"].split(" ")[0].replace("-", "")
            for items in detail_dict:
                if len(items.keys()) == 0:
                    continue
                idx += 1
                ref_one = {}
                key = list(items.keys())[0]
                dicts_one = items[key]
                sub_info = stdict.get(key)
                if sub_info is None:
                    continue
                ref_one["sub_db_id"] = sub_info[1]
                ref_one["cited_rawid"] = rawid
                ref_one["cited_lngid"] = lngid
                ref_lngid = "{}{}".format(lngid, str(idx).zfill(4))
                ref_one["lngid"] = ref_lngid
                ref_one["keyid"] = ref_lngid
                ref_one["strtype"] = sub_info[0]
                ref_one = wanfangarticle_ref_parse(ref_one, dicts_one)
                ref_one.pop("sub_db_id")
                if len(ref_one["refer_text_site"]) > 1:
                    ref_id = ref_id + ref_lngid + ";"
                    list_ref.append(ref_one)
    ref_data = {}
    ref_cnt = len(list_ref)
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = source_type
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        save_data.append({"table": "journal_ref_latest", "data": ref_data})
    result.save_data = save_data
    return result


def get_extra_meta(data, jrawids_dic):
    extra_meta = []
    wanfang_sub = ["00288"]
    for sub_db_id in wanfang_sub:
        v = json.loads(jrawids_dic[sub_db_id])
        jrawids = v["jrawids"]
        jinfo = v["info"]
        if data["journal_raw_id"] + ";" in jrawids and not data["rawid"].startswith("10."):
            new_data = deepcopy(data)
            sub_db_id = jinfo["sub_db_id"]
            lngid = BaseLngid().GetLngid(sub_db_id, data["rawid"])
            new_data["lngid"] = lngid
            new_data["keyid"] = lngid
            new_data["product"] = jinfo["product"]
            new_data["sub_db"] = jinfo["sub_db"]
            new_data["sub_db_id"] = sub_db_id
            new_data["provider"] = jinfo["provider"]
            new_data["zt_provider"] = jinfo["zt_provider"]
            extra_meta.append({"table": "journal_latest", "data": new_data})
            # extra_meta[sub_db_id] = new_data
    return extra_meta


def wanfangarticle_ref_parse(data, ref_json):
    strtype = data["strtype"]
    title = getJsonVal(ref_json, "titleList")
    idx = title.find('%')
    if idx > 0:
        title = title[0:idx].strip()
    title = cleanSemicolon(title)
    source_name = getJsonVal(ref_json, "periodicaltitleList")
    author = cleanSemicolon(getJsonVal(ref_json, "creatorList").replace("%", ";"))
    pub_year = str(ref_json.get("publishyear", ""))
    vol = getJsonVal(ref_json, "volum")
    num = getJsonVal(ref_json, "issue")
    publisher = getJsonVal(ref_json, "publisher")
    page_info = getJsonVal(ref_json, "page")
    doi = getJsonVal(ref_json, "doi").replace(' ', '').replace('－', '-')
    data["title"] = title
    data["source_name"] = source_name
    data["author"] = author
    data["pub_year"] = pub_year
    data["vol"] = vol
    data["num"] = num
    data["publisher"] = publisher
    data["doi"] = doi
    jump_page = ""
    begin_page = ""
    end_page = ""
    line = page_info
    data["page_info"] = line
    idx = line.find(',')
    if idx > 0:
        jump_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉加号及以后部分
    idx = line.find('-')
    if idx > 0:
        end_page = line[idx + 1:].strip()
        line = line[0:idx].strip()  # 去掉减号及以后部分
    begin_page = line.strip()
    if len(end_page) < 1:
        end_page = begin_page
    data["page_info"] = page_info
    data["jump_page"] = jump_page
    data["begin_page"] = begin_page
    data["end_page"] = end_page

    old_linked_id = ref_json.get("wanId", "")
    if "^" in old_linked_id:
        old_linked_id = ""
    data["old_linked_id"] = old_linked_id
    linked_id = ""
    if len(old_linked_id) > 0 and len(data["sub_db_id"]) > 0:
        linked_id = BaseLngid().GetLngid(data["sub_db_id"], old_linked_id)
    data["linked_id"] = linked_id
    data["refer_text_raw"] = json.dumps(ref_json, ensure_ascii=False)
    refer_text_site = ""
    if strtype == "S":
        if len(source_name) > 0:
            refer_text_site += source_name + ","
        if len(title) > 0:
            refer_text_site += title + "[{}].".format(strtype)
    else:
        if len(author) > 0:
            refer_text_site += author.replace(";", ",") + "."
        if len(title) > 0:
            refer_text_site += title + "[{}].".format(strtype)
        if len(source_name) > 0:
            refer_text_site += source_name + ","
        if len(pub_year) > 0:
            refer_text_site += pub_year
        if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
            refer_text_site = refer_text_site[:-1]
        if strtype == "J":
            if len(vol) > 0:
                refer_text_site += "," + vol
            if len(num) > 0:
                refer_text_site += "({})".format(num)
        if strtype in ("J", "M", "D", "C"):
            if len(begin_page) > 0:
                refer_text_site += ":" + data["begin_page"]
                if len(data["end_page"]) > 0:
                    refer_text_site += "-" + data["end_page"]
    data["refer_text_site"] = refer_text_site
    return data


def wanfangmed_wanfangmedhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = int(re.findall('\d+', data['total_page'])[0])
        if total_page > 200:
            return result
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        home_json = json.loads(callmodel.sql_model.home_json)
        his = home_json.get("his", "")
        for item in data['qk_info']['children']:
            result.befor_dicts.update.update({'page': total_page})
            journal_rawid = item['url'].split('/')[-1]
            qk_name = item['title']
            yinzi = item['info'].split('：')[-1]

            new_dict = dict()
            new_dict["journal_name"] = qk_name
            new_dict["yinzi"] = yinzi

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00288"
            temp_info["is_active"] = 1
            temp_info["subject"] = his
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({"journal_json": json.dumps(new_dict, ensure_ascii=False),
                                    "sub_db_id": "00288",
                                    "is_active": 1})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model_next)

    return result
