
import json
import time

import re


from parsel import Selector

from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, JournalListModel, JournalIssueModel, \
    JournalArticleModel, DealInsertModel, JournalHomeModel, EtlDealModel, DealUpdateModel

__all__ = [
    "cmacmcjournal_cmacmcjournalhome_callback",
    "cmacmcjournal_cmacmcjournallist_callback",
    "cmacmcjournal_cmacmcjournalissue_callback",
    "cmacmcjournal_cmacmcjournalarticle_callback",
    "cmacmcjournal_cmacmcjournalarticle_etl_callback"
]


def cmacmcjournal_cmacmcjournalhome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    rule = re.compile(r"https*://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data = para_dicts['data']['1_1']['data']
    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    for one_data in data["children"]:
        journal_name = one_data["journal_name"]
        url = one_data["url"]
        journal_rawid_list = re.findall(rule, url)
        # 因为出现了一个 http://www.biomat-trans.com/EN/2096-112X/home.shtml ,且该地址为404, 因此去掉不符合规则的
        if len(journal_rawid_list) == 0:
            continue
        journal_rawid = journal_rawid_list[0]
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        temp["journal_name"] = journal_name
        temp["url"] = url
        new_temp = {}
        new_temp["journal_rawid"] = journal_rawid
        new_temp["task_tag"] = temp["task_tag"]
        new_temp["task_name"] = temp["task_name"]
        new_temp["sub_db_id"] = temp["sub_db_id"]
        new_temp["is_active"] = 1
        del temp["task_tag"]
        del temp["task_name"]
        del temp["sub_db_id"]
        new_temp["journal_json"] = json.dumps(temp, ensure_ascii=False)
        d_i_model.lists.append(new_temp)

        du_model = DealUpdateModel()

        # du_model.update_no_placeholder.update({"page": max_page})
        du_model.update.update({
            "is_active": 1})
        du_model.where.update({"journal_rawid": journal_rawid,
                               "task_tag": new_temp["task_tag"],
                               "task_name": callmodel.sql_model.task_name
                               # "is_active": "0"
                               })
        result.next_dicts.update_list.append(du_model)
    result.next_dicts.insert.append(d_i_model)
    return result


def cmacmcjournal_cmacmcjournallist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "journal_rawid": sql_model.journal_rawid,
                  "sub_db_id": sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next,
                  }

    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    journal_json_dicts = json.loads(sql_model.journal_json)
    if "1_1" in para_dicts["data"]:
        html = callmodel.para_dicts['data']['1_1']['html']
        url = journal_json_dicts["url"]
        # res = Selector(text=html)
        # di_model_before = DealUpdateModel()
        rule = re.compile("window.location.href = '(.*?)'")
        url_new_list = re.findall(rule, html)
        url_home = url_new_list[0]
        journal_json_dicts["url_home"] = url_home
        url_current = url_home.replace("home", "current")
        journal_json_dicts["url_current"] = url_current
        url_oldvolumn = url.rstrip("/") + "/CN/article/showOldVolumn.do"
        journal_json_dicts["url_oldvolumn"] = url_oldvolumn

    if "1_2" in para_dicts["data"]:
        datas = para_dicts["data"]["1_2"]["data"]["children"]
        for data in datas:
            infos = data["infos"]
            a_key, a_value = infos.split("：")
            a_key = a_key.replace(" ", "")
            a_value = a_value.replace("\u3000", "")
            if a_key == "主管":
                journal_json_dicts["director_dept"] = a_value
            elif a_key == "主办":
                journal_json_dicts["publisher"] = a_value
            elif a_key == "编辑":
                journal_json_dicts["edit_room"] = a_value
            elif a_key == "出版":
                journal_json_dicts["publisher"] = (journal_json_dicts.get("publisher", "") + ";" + a_value).strip(";")
            elif a_key == "社长":
                journal_json_dicts["proprieter"] = a_value
            elif a_key == "总编辑":
                journal_json_dicts["chief_editor"] = a_value.replace(" ", ";")
            elif a_key == "主任":
                journal_json_dicts["edit_irector"] = a_value
            elif a_key == "国内刊号":
                rule = re.compile("CN(.*)")
                cnnos = re.findall(rule, a_value)
                cnno = cnnos[0]
                journal_json_dicts["cnno"] = cnno.strip()
            elif a_key == "国际刊号":
                rule = re.compile("ISSN(.*)")
                ISSNs = re.findall(rule, a_value)
                ISSN = ISSNs[0]
                journal_json_dicts["issn"] = ISSN.strip()
            elif a_key == "邮箱":
                journal_json_dicts["email"] = a_value
            elif a_key == "电话":
                journal_json_dicts["tel_code"] = a_value
            elif a_key == "地址":
                journal_json_dicts["edit_office_addr"] = a_value

    if "1_3" in para_dicts["data"]:
        data = para_dicts["data"]["1_3"]["data"]
        price = data["price"]
        journal_json_dicts["unit_price"] = price

    if "1_4" in para_dicts["data"]:
        issue_json = {}
        issue_json["journal_name"] = journal_json_dicts["journal_name"]

        rule = re.compile("(\d{4})")
        data = para_dicts["data"]["1_4"]["data"]["children"]
        k = 0
        for one_data in data:
            # 用于处理最开始的空白问题
            if k == 0:
                k += 1
                continue
            else:
                years = one_data["years"]
                if years != "":
                    pub_year = re.search(rule, years).group()
                for issue_data in one_data["children"]:
                    if issue_data["issues"] == "":
                        continue
                    else:
                        temp = info_dicts.copy()
                        temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
                        del temp["task_tag_next"]
                        num = issue_data["issues"].replace("No.", "").strip()
                        url = issue_data["url"]
                        issue_json["url"] = url
                        temp.update(
                            {'pub_year': pub_year, 'num': num,
                             'issue_json': json.dumps(issue_json, ensure_ascii=False)})
                        d_i_model.lists.append(temp)
    journal_json = json.dumps(journal_json_dicts, ensure_ascii=False)
    result.befor_dicts.update.update({'journal_json': journal_json})
    result.next_dicts.insert.append(d_i_model)
    return result


def cmacmcjournal_cmacmcjournalissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "journal_rawid": sql_model.journal_rawid,
                  "pub_year": sql_model.pub_year,
                  "num": sql_model.num,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_1_1 = para_dicts["data"].get("1_1")
    if data_1_1 is None:
        print("******************")
        return result
    datainfo = data_1_1["data"]["children"]
    # print(datainfo)
    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    i = 0
    for data in datainfo:
        i += 1
        title = data["title"].replace("\t", "").strip()
        url = data["href"].replace("\t", "").strip()
        zuozhe = data["zuozhe"].replace("\t", "").strip()
        nianqi = data["nianqi"].replace("\t", "").strip()
        doi = data["doi"].replace("\t", "").strip()
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        temp["title"] = title
        temp["url"] = url
        temp["zuozhe"] = zuozhe
        temp["nianqi"] = nianqi
        temp["doi"] = doi
        if doi == "":
            rawid = url.split("CN/")[-1]
        else:
            rawid = doi
        journal_rawid = sql_model.journal_rawid
        # 因为发现这本刊的doi和其他刊的部分doi一样, 因此只能针对这本刊做特殊处理(怀疑这本刊有问题, 未来估计他们自己都要改)
        # 如果后期发现这种情况是非特殊情况, 那么可以新增一个 journal_rawid 和 issn号的对应字典, 这样可以按下面的类似逻辑进行特殊处理
        if journal_rawid == "zhwsgflbdzzz" and not rawid.__contains__("2095-8765"):
            rawid = f"{journal_rawid}_{rawid}"
        new_temp = {}
        # new_temp["rawid"] = f"{sql_model.journal_rawid}_{sql_model.pub_year}_{sql_model.num}_{str(i)}"
        new_temp["rawid"] = rawid
        new_temp["task_tag"] = temp["task_tag"]
        new_temp["task_name"] = temp["task_name"]
        new_temp["sub_db_id"] = temp["sub_db_id"]
        del temp["task_tag"]
        del temp["task_name"]
        del temp["sub_db_id"]
        new_temp["article_info_json"] = json.dumps(temp, ensure_ascii=False)

        # 经查  rawid为空是因为有视频文章的缘故
        if rawid != "":
            d_i_model.lists.append(new_temp)

    result.next_dicts.insert.append(d_i_model)
    # print(result)
    return result


def cmacmcjournal_cmacmcjournalarticle_callback(callmodel: CallBackModel[JournalArticleModel]) -> DealModel:
    result = DealModel()
    # print(callmodel)
    return result


def cleanSemicolon(text):
    text = text.replace('；', ';')  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def clean_author(text):
    text = text.replace("†()", "")
    text = text.replace("()", "")
    text = text.replace("*", "")
    text = text.strip(",")
    text = re.sub("(\d+)", "[\\1]", text)
    text = text.replace(",[", "[")
    text = text.replace(",", ";")
    text = re.sub(";+", ";", text)
    text = text.strip()
    return text

def clean_author_cn(text):
    # email_list = re.findall("†(\(.*?\))", text)
    email_list = []
    data_list = text.split(",")
    author = ""
    for data in data_list:
        if "mailto:" in data:
            one_author_email = re.sub(".*?\(\s*mailto:(.*?)\)", "\\1", data)
            author_before = re.sub("(.*?)†*\(\s*mailto:.*", "\\1", data).strip()
            # 认定人名至少会有2个字符
            if len(author_before) > 1:
                author = author_before
            email = f"{one_author_email}: {author.strip()}"
            email_list.append(email)
        else:
            author_before = re.sub("(.*?)\d+", "\\1", data)
            if len(author_before) > 1:
                author = author_before
    email_info = ";".join(email_list)
    text = re.sub("(\(.*?\))", "", text)
    text = text.replace("†()", "")
    text = text.replace("†", "")

    text = text.replace("()", "")
    text = text.replace("*", "")
    text = text.strip(",")
    text = re.sub("(\d+)", "[\\1]", text)
    text = text.replace(",[", "[")
    text = text.replace(",", ";")
    text = re.sub(";+", ";", text)
    text = text.strip()
    return text, email_info


def clean_organ(text):
    text = text.replace("†()", "")
    text = text.strip(",")
    # text = re.sub("(\d+)\.", "[\\1]", text)
    text_list = re.findall("(\d+\.)", text, re.S)
    for one in text_list:
        if len(one) > 6:
            # 认定为邮编
            post_code = one[:6]
            last_one = one[6:]
            new_one = f"{post_code};{last_one}"
            text = text.replace(one, new_one, 1)
    text = re.sub("(\d+)\.", "[\\1]", text)
    text = text.replace("[", ";[").lstrip(";").replace(";;", ";").replace("] ", "]")
    # text = text.replace(",[", ";[").replace("].", "]")
    text = text.strip()
    return text


def clean_some_date(date_infos):
    recv_date = ""
    pub_date = ""
    accept_date = ""
    revision_date = ""
    corr_author = ""
    fund = ""
    for date_info in date_infos:
        rule_resv = re.compile("收稿日期:(\d{4}-\d{2}-\d{2})")
        rule_pub = re.compile("出版日期:(\d{4}-\d{2}-\d{2})")
        rule_accept = re.compile("接受日期:(\d{4}-\d{2}-\d{2})")
        rule_revision = re.compile("修订日期:(\d{4}-\d{2}-\d{2})")
        rule_corr_author = re.compile("通[信讯]作者:(.*)")
        rule_fund = re.compile("基金资助:(.*)")
        text_resv = re.findall(rule_resv, date_info["date_info"])
        text_pub = re.findall(rule_pub, date_info["date_info"])
        text_accept = re.findall(rule_accept, date_info["date_info"])
        text_revision = re.findall(rule_revision, date_info["date_info"])
        text_corr_author = re.findall(rule_corr_author, date_info["date_info"])
        text_fund = re.findall(rule_fund, date_info["date_info"])
        if len(text_resv) != 0:
            recv_date = text_resv[0].replace("-", "")
        if len(text_pub) != 0:
            pub_date = text_pub[0].replace("-", "")
        if len(text_accept) != 0:
            accept_date = text_accept[0].replace("-", "")
        if len(text_revision) != 0:
            revision_date = text_revision[0].replace("-", "")
        if len(text_corr_author) != 0:
            corr_author = text_corr_author[0]
        if len(text_fund) != 0:
            fund = text_fund[0]
    return recv_date, pub_date, accept_date, revision_date, corr_author, fund


def clean_some_date_en(date_infos):

    fund_alt = ""
    intro_alt = ""
    for date_info in date_infos:

        rule_fund = re.compile("Supported by:(.*)")
        rule_intro = re.compile("About author:(.*)")

        text_fund = re.findall(rule_fund, date_info["date_info"])
        text_intro = re.findall(rule_intro, date_info["date_info"])

        if len(text_fund) != 0:
            fund_alt = text_fund[0]
        if len(text_intro) != 0:
            intro_alt = text_intro[0]
    return fund_alt, intro_alt


def checkExist(obj):
    if obj is not None and len(obj) > 0:
        return True
    else:
        return False


def cmacmcarticle_ref_parse(data, sel_ref):
    refer_text_raw = sel_ref["ref_raw"]
    data["refer_text_raw"] = refer_text_raw
    refer_text_site = sel_ref["ref"]
    data["refer_text_site"] = re.sub("\\s+", " ", refer_text_site).replace("\n", "").replace("\r", "").replace("&nbsp",
                                                                                                               "").strip()
    ref_num = sel_ref["ref_num"]
    data["ref_num"] = ref_num
    return data


def cmacmcarticle_ref_info_parse(meta, allref, ref_down_date):
    ref_id = ""
    list_ref = []
    idx = 0
    for ref_item in allref:
        if ref_item.get("ref_raw", "") != "":
            idx += 1
            ref_one = {}
            ref_one["is_deprecated"] = "0"
            ref_one["batch"] = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
            ref_one["sub_db_id"] = meta["sub_db_id"]
            ref_one["product"] = meta["product"]
            ref_one["sub_db"] = meta["sub_db"]
            ref_one["down_date"] = ref_down_date
            ref_one["provider"] = meta["provider"]
            ref_one["cited_rawid"] = meta["rawid"]
            ref_one["cited_lngid"] = meta["lngid"]
            ref_one["sub_db_id"] = meta["sub_db_id"]
            ref_one["strtype"] = ""
            ref_one["ref_index"] = str(idx)
            ref_one = cmacmcarticle_ref_parse(ref_one, ref_item)
            ref_lngid = "{}{}".format(meta["lngid"], str(idx).zfill(4))
            ref_one["lngid"] = ref_lngid
            ref_one["keyid"] = ref_lngid
            ref_id = ref_id + ref_lngid + ";"
            list_ref.append(ref_one)

    return ref_id, list_ref


def clean_page_info(text):
    jump_page = ""
    begin_page = ""
    end_page = ""
    page_info = text.split(":")[-1].rstrip(".").strip()
    page_info_raw = page_info
    idx = page_info.find('+')
    if idx > 0:
        jump_page = page_info[idx + 1:].strip()
        page_info = page_info[0:idx].strip()  # 去掉加号及以后部分
    idx = page_info.find('-')
    if idx > 0:
        end_page = page_info[idx + 1:].strip()
        page_info = page_info[0:idx].strip()  # 去掉减号及以后部分
    begin_page = page_info.strip()
    vol_list = re.findall("\d{4}.*?(\d+)\(\d+", text)
    if len(vol_list) != 0:
        vol = vol_list[0]
    else:
        vol = ""
    return begin_page, end_page, jump_page, vol, page_info_raw


def cmacmcjournal_cmacmcjournalarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_info_json"])
    journal_info = sql_model["journal_info"]

    data = {}
    sub_db_id = "00638"
    datainfo = para_dicts["data"]["1_1"]
    title = datainfo["title"]
    title_alt = datainfo["title_alt"]
    abstract = datainfo["abstract"]
    abstract_alt = datainfo["abstract_alt"].replace(abstract, "")
    author = datainfo["author"]
    author, email_info = clean_author_cn(author)
    organ = datainfo["organ"]
    organ = clean_organ(organ)
    author_alt = datainfo["author_alt"]
    # 复用clean_author_cn, 实际上email_info_alt毫无用处只是符合输出格式,甚至都不正确
    author_alt, email_info_alt = clean_author_cn(author_alt)
    organ_alt = datainfo["organ_alt"]
    organ_alt = clean_organ(organ_alt)
    date_infos = datainfo["date_infos"]["children"]
    date_infos_en = datainfo["date_infos_en"]["children"]
    recv_date, pub_date, accept_date, revision_date, corr_author, fund = clean_some_date(date_infos)
    fund_alt, intro_alt = clean_some_date_en(date_infos_en)

    keyword = datainfo["keyword"]
    keyword_alt = datainfo["keyword_alt"]
    column_info = datainfo["column_info"]

    judge = datainfo["judge"]
    title_neirong = datainfo["title_neirong"]
    doi_new = datainfo["doi_new"]
    author_intro = datainfo["author_intro"] + ";" + intro_alt
    keyword_neirong = datainfo["keyword_neirong"]
    if judge == "视频资源库":
        title = title_neirong
        keyword = keyword_neirong
    doi = article_json["doi"]
    if doi == "":
        doi = doi_new
    page_infos = article_json["nianqi"]
    begin_page, end_page, jump_page, vol, page_info = clean_page_info(page_infos)

    src_data = down_model["1_1"]
    sel = Selector(src_data.html)
    fulltext_type = ""
    quanwen = sel.xpath("//div[@class='j-anniu']/a[@class='j-html' and  contains(string(), '全文')]")
    if checkExist(quanwen):
        fulltext_type += ";html"
    pdf = sel.xpath("//div[@class='j-anniu']/a[@class='j-pdf' and  contains(string(), '下载PDF')]")
    if checkExist(pdf):
        fulltext_type += ";pdf"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    data["rawid_alt"] = ""
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["is_deprecated"] = "0"
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["keyid"] = lngid
    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    data["title_alt"] = title_alt.strip()
    data["begin_page"] = begin_page.strip()
    data["end_page"] = end_page.strip()
    data["jump_page"] = jump_page.strip()
    data["page_info"] = page_info.strip()
    data["vol"] = vol.strip()
    data["num"] = article_json["num"].strip()

    data["product"] = "CMACMC"
    data["sub_db"] = "QK"
    data["provider"] = "CMA"
    data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id

    data["source_type"] = "3"
    data["provider_url"] = article_json["url"].strip()
    data["journal_raw_id"] = sql_model["journal_rawid"].strip()
    data["journal_name"] = journal_info.get("journal_name", "").strip()
    data["journal_name_alt"] = journal_info.get("journal_name_alt", "").strip()
    data["issn"] = journal_info.get("issn", "").strip()
    data["cnno"] = journal_info.get("cnno", "").strip()
    data["abstract"] = abstract.strip()
    data["abstract_alt"] = abstract_alt.strip()
    new_organ = organ.strip()
    if len(new_organ) == 0:
        new_author = re.sub("\[[\d+,]+\]", "", author.strip())
        new_author = re.sub(";+", ";", new_author)
    else:
        new_author = author.strip()
    new_organ_alt = organ_alt.strip()
    if len(new_organ_alt) == 0:
        new_author_alt = re.sub("\[[\d+,]+\]", "", author_alt.strip())
        new_author_alt = re.sub(";+", ";", new_author_alt)
    else:
        new_author_alt = author_alt.strip()
    data["author"] = new_author
    data["author_1st"] = new_author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = organ.strip()
    data["author_alt"] = new_author_alt.strip()
    data["organ_alt"] = organ_alt.strip()
    data["recv_date"] = recv_date.strip()
    data["pub_date"] = pub_date.strip()
    data["accept_date"] = accept_date.strip()
    data["revision_date"] = revision_date.strip()
    data["corr_author"] = corr_author.strip()
    if corr_author:
        author_intro = author_intro + ";通信作者: " + corr_author
    data["fund"] = fund.strip()
    data["author_intro"] = author_intro.strip(";").strip()
    data["down_date"] = down_date.strip()
    data["latest_date"] = down_date.strip()
    data["keyword"] = keyword.replace("关键词:", "", 1).replace(",", ";").replace("，", ";").strip()
    data["keyword_alt"] = keyword_alt.replace("Key words:", "", 1).replace(",", ";").strip()
    data["column_info"] = column_info.strip()
    data["fulltext_type"] = fulltext_type.lstrip(";").strip()
    data["doi"] = doi.strip()
    data["email"] = email_info.strip()
    data["fund_alt"] = fund_alt.strip()
    pub_year = article_json["pub_year"]
    year_new = datainfo.get("year_new", "")

    if len(pub_date) > 0:
        # 根据lt要求, 如果pub_date 和 pub_year的年不一样, 则使用页面内的日期, 即使用pub_date提取pub_year, 00523同理
        new_pub_year = pub_date[0:4]
        if pub_year != new_pub_year:
            pub_year = new_pub_year
        if not pub_date:
            if pub_year != "":
                pub_date = pub_year + "0000"
    else:
        pub_year = year_new
        pub_date = f"{pub_year}0000"
    data["pub_date"] = pub_date.strip()
    if data["pub_date"] == "0000":
        data["is_deprecated"] = "1"
    data["pub_year"] = pub_year.strip()
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch

    status = "SUCCESS"
    err_msg = ""
    result.status = status
    result.err_msg = err_msg

    refs = datainfo["refs"]["children"]
    list_ref = []
    ref_id, list_ref = cmacmcarticle_ref_info_parse(data, refs, down_date)
    ref_data = {}
    ref_cnt = len(list_ref)
    data["ref_cnt"] = str(ref_cnt)
    save_data = []
    save_data.append({"table": "journal_latest", "data": data})
    if ref_cnt > 0:
        ref_data["keyid"] = lngid
        ref_data["lngid"] = lngid
        ref_data["source_type"] = data["source_type"]
        ref_data["sub_db_id"] = sub_db_id
        ref_data["pub_year"] = pub_year
        ref_data["batch"] = batch
        ref_data["down_date"] = down_date
        ref_data["is_deprecated"] = "0"
        ref_data["ref_cnt"] = str(ref_cnt)
        ref_data["ref_id"] = ref_id[:-1]
        ref_data["refer_info"] = list_ref
        # ref_data["repeat_cnt"] = repeat_cnt
        save_data.append({"table": "journal_ref_latest", "data": ref_data})
    elif data["ref_cnt"] == 0:
        result.ref_state = {
            "lngid": lngid,
            "table": "journal_ref_latest"
        }
    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    return result
