import json
import re
import time

from re_common.vip.baseencodeid import BaseLngid

docType = {"abstract": "J", "Abstract of Published Item": "J", "Abstract only": "J", "addendum": "J", "Address": "K",
           "Addresses": "K", "Animal report section": "R", "Annual report": "R", "Annual report section": "R",
           "Art Exhibit Review": "J", "Article": "", "Article Book Book Chapter": "M", "Article Book Chapter": "M",
           "Article Letter": "K", "Article Meeting": "C", "Article Meeting Book Chapter": "C", "Article Software": "K",
           "Article Thesis/Dissertation": "D", "Article;Thesis/Dissertation": "D", "article-commentary": "K",
           "Audio": "K", "Autobiography": "K", "Bibliography": "J", "Biographical-Item": "", "Biography": "K",
           "Book": "", "Book Book Chapter": "M", "Book Chapter": "M", "Book chapter": "", "Book Chapter Meeting": "M",
           "Book Chapter Meeting Paper": "M", "Book Meeting": "M", "Book Meeting Book Chapter": "M", "Book Review": "",
           "book-review": "", "brief-report": "R", "Bulletin": "R", "Bulletin article": "R", "Case Reports": "R",
           "Case studies": "R", "case-report": "", "Chronology": "K", "CITED-REFERENCE": "K", "Citing item": "K",
           "Classical Article": "K", "Clinical Conference": "K", "Clinical Study": "K", "Clinical Trial": "K",
           "Clinical Trial Protocol": "K", "Clinical Trial, Phase I": "K", "Clinical Trial, Phase II": "K",
           "Clinical Trial, Phase III": "K", "Clinical Trial, Phase IV": "K", "Clinical Trial, Veterinary": "K",
           "Comment": "K", "Company Profile": "K", "Comparative Study": "K", "Compendium Datasheet": "K",
           "Conference paper": "C", "Conference Paper": "", "Conference Paper in Journal": "C",
           "Conference Paper in Journal - Original Abstracted": "C", "Conference proceedings": "C",
           "Conference Proceedings": "", "Conference Proceedings in Journal": "C",
           "Conference Proceedings in Journal - Original Abstracted": "C", "Congress": "K", "Congresses": "K",
           "Consensus Development Conference": "R", "Consensus Development Conference, NIH": "R",
           "Controlled Clinical Trial": "K", "Corrected and Republished Article": "K", "Correction": "",
           "correction": "", "Correction, Addition": "J", "Correspondence": "K", "Dance Performance Review": "K",
           "Data Paper": "K", "Data set": "K", "Data study": "K", "Database Review": "K", "Dataset": "K",
           "Dictionary": "K", "Directory": "K", "Discussion": "", "Dissertation": "K", "Dissertation/Thesis": "D",
           "Duplicate Publication": "K", "Early Access": "K", "editorial": "", "Editorial Material": "",
           "English Abstract": "K", "Equivalence Trial": "K", "Evaluation Studies": "K", "Evaluation Study": "K",
           "Excerpt": "K", "Expression of Concern": "K", "Festschrift": "K", "Fiction, Creative Prose": "K",
           "Film Review": "K", "Government Document": "R", "Government Publication": "R", "Guideline": "K",
           "Hardware Review": "K", "Historical Article": "K", "Index": "K", "Interactive Tutorial": "K",
           "Interview": "K", "Introductory Journal Article": "J", "Item About an Individual": "",
           "Item Withdrawal": "K", "Journal Article": "J", "Journal article": "", "Journal issue": "J",
           "Journal Paper": "J", "Journal Paper - Original Abstracted": "J",
           "Journal Paper - Translation Abstracted": "J", "Lecture": "K", "Lectures": "K", "Legal Case": "K",
           "Legal Cases": "K", "Legislation": "K", "Letter": "", "letter": "", "Letter Meeting": "K", "Meeting": "C",
           "Meeting Abstract": "", "Meeting Book Chapter": "C", "Meeting paper": "C", "Meeting Paper": "",
           "Meta-Analysis": "K", "Miscellaneous": "K", "Multicenter Study": "K", "Music Performance Review": "K",
           "Music Score": "K", "Music Score Review": "", "news": "N", "News Item": "K", "Newspaper Article": "N",
           "Note": "", "Obituary": "K", "Observational Study": "K", "Observational Study, Veterinary": "K",
           "Online First": "K", "oration": "K", "Other": "K", "Overall": "K", "Patent": "P",
           "Patient Education Handout": "K", "Periodical Index": "K", "Personal Narrative": "K",
           "Personal Narratives": "K", "Poetry": "K", "Portrait": "K", "Portraits": "K", "Practice Guideline": "K",
           "Pragmatic Clinical Trial": "K", "Preprint": "K", "preprint": "", "press-release": "K",
           "Proceedings Paper": "", "Publication with Expression of Concern": "K", "Published Erratum": "K",
           "Randomized Controlled Trial": "K", "Randomized Controlled Trial, Veterinary": "K",
           "rapid-communication": "", "Record Review": "K", "Report": "R", "Report Section": "R", "Repository": "R",
           "Reprint": "", "Research Support, American Recovery and Reinvestment Act": "K",
           "Research Support, N.I.H., Extramural": "K", "Research Support, N.I.H., Intramural": "K",
           "Research Support, Non-U.S. Gov't": "K", "Research Support, U.S. Gov't, Non-P.H.S.": "K",
           "Research Support, U.S. Gov't, P.H.S.": "K", "research-article": "", "Retracted Publication": "K",
           "Retraction": "", "Retraction of Publication": "K", "Review": "", "review-article": "K",
           "Scientific Integrity Review": "K", "Script": "K", "Short Paper": "K", "Software": "K",
           "Software Review": "", "Standard": "S", "Systematic Review": "K", "Technical Report": "R",
           "Theater Review": "J", "Thesis": "D", "Thesis/Dissertation": "D", "TV Review, Radio Review": "K",
           "TV Review, Radio Review, Video": "K", "Twin Study": "K", "undefined": "K", "Unspecified": "K",
           "Validation Studies": "K", "Validation Study": "K", "Video-Audio Media": "K", "Webcast": "K",
           "Webcasts": "K", "Withdrawn Publication": "K"}

meta_fileds = {
    "3": {"keyid", "lngid", "rawid", "rawid_alt", "doi", "provider", "product", "sub_db", "sub_db_id", "source_type",
          "provider_url", "vision", "latest_date", "is_deprecated", "title", "title_alt", "keyword", "keyword_alt",
          "keyword_machine", "clc_no_1st", "clc_no", "clc_machine", "subject_word", "subject_edu_code", "subject_edu",
          "subject", "research_field", "abstract", "abstract_alt", "abstract_type", "abstract_alt_type",
          "begin_page_sort",
          "begin_page", "end_page", "jump_page", "page_info", "doc_code", "doc_no", "raw_type", "is_informal",
          "recv_date",
          "accept_date", "revision_date", "pub_date", "process_date", "special_no", "subject_no", "volumn", "page_cnt",
          "pdf_size", "fulltext_type", "int_mark", "reprinted", "column_info", "fund_id", "fund", "fund_alt",
          "author_id",
          "orc_id", "researcher_id", "author_1st", "author", "author_raw", "author_alt", "corr_author",
          "corr_author_id",
          "email", "author_intro", "organ_id", "organ_1st", "organ", "organ_alt", "preferred_organ", "organ_area",
          "journal_id", "journal_raw_id", "gch", "book_id", "journal_clc", "journal_name", "journal_name_alt",
          "pub_year",
          "vol", "num", "real_num", "is_suppl", "issn", "eissn", "cnno", "range", "is_oa", "country", "language",
          "batch",
          "down_date", "down_cnt", "sub_db_class_name"},
    "6": {"keyid", "lngid", "rawid", "rawid_alt", "doi", "provider", "product", "sub_db", "sub_db_id", "source_type",
          "provider_url", "vision", "latest_date", "is_deprecated", "title", "title_alt", "title_sub", "keyword",
          "keyword_alt", "keyword_machine", "clc_no_1st", "clc_no", "clc_machine", "subject_word", "subject_edu_code",
          "subject_edu", "subject", "research_field", "abstract", "abstract_alt", "begin_page_sort", "begin_page",
          "end_page", "jump_page", "page_info", "doc_code", "doc_no", "process_date", "page_cnt", "pdf_size",
          "fulltext_type", "fund_id", "fund", "fund_alt", "author_id", "author_1st", "author", "author_alt",
          "corr_author_id", "corr_author", "email", "author_intro", "organ_id", "organ_1st", "organ", "organ_alt",
          "preferred_organ", "organ_area", "meeting_name", "meeting_name_alt", "meeting_record_name",
          "meeting_record_name_alt", "meeting_intro", "meeting_place", "meeting_counts", "meeting_date_raw",
          "accept_date", "recv_date", "pub_date", "publisher", "pub_place", "host_organ", "host_organ_id",
          "meeting_code", "sponsor", "society", "meeting_level", "edit_group", "chief_editor", "book_id", "issn",
          "pub_year", "vol", "num", "is_suppl", "is_oa", "country", "language", "batch", "down_date", "down_cnt",
          "sub_db_class_name"}
}

mapMonth = {
    "JAN": "01",
    "FEB": "02",
    "MAR": "03",
    "APR": "04",
    "MAY": "05",
    "JUN": "06",
    "JUL": "07",
    "AUG": "08",
    "SEP": "09",
    "OCT": "10",
    "NOV": "11",
    "DEC": "12"
}

mapLib = {
    "SCI": "SCIE",
    "SSCI": "SSCI",
    "AHCI": "AHCI",
    "ISTP": "CPCIS",
    "ISSHP": "CPCISSH",
    "ESCI": "ESCI",
    "CCR": "CCRE",
    "IC": "IC",
    "BHCI": "BHCI",
    "BSCI": "BSCI"
}

mapLanguage = {
    "English": "EN",
    "Arabic": "AR",
    "Byelorussian": "BE",
    "Bulgarian": "BG",
    "Catalan": "CA",
    "Czech": "CS",
    "Danish": "DA",
    "German": "DE",
    "Greek": "EL",
    "Spanish": "ES",
    "Estonian": "ET",
    "Finnish": "FI",
    "French": "FR",
    "Croatian": "HR",
    "Magyar": "HU",
    "Icelandic": "IS",
    "Italian": "IT",
    "Hebrew": "IW",
    "Japanese": "JA",
    "Korean": "KO",
    "Lithuanian": "LT",
    "Latvian": "LV",
    "Macedonian": "MK",
    "Dutch": "NL",
    "Norwegian": "NO",
    "Polish": "PL",
    "Portuguese": "PT",
    "Rumanian": "RO",
    "Russian": "RU",
    "Croatian": "SH",
    "Slovak": "SK",
    "Slovene": "SL",
    "Albanian": "SQ",
    "Serbian": "SR",
    "Swedish": "SV",
    "Thai": "TH",
    "Turkish": "TR",
    "Ukrainian": "UK",
    "Chinese": "ZH",
}

db_dict = {
    "Science Citation Index Expanded (SCI-EXPANDED)": "SCI",
    "Social Science Citation Index (SSCI)": "SSCI",
    "Conference Proceedings Citation Index - Social Science &amp; Humanities (CPCI-SSH)": "ISSHP",
    "Conference Proceedings Citation Index - Science (CPCI-S)": "ISTP",
    "Index Chemicus (IC)": "IC",
    "Current Chemical Reactions (CCR-EXPANDED)": "CCR",
    "Emerging Sources Citation Index (ESCI)": "ESCI",
    "Arts &amp; Humanities Citation Index (A&amp;HCI)": "AHCI",
    "Book Citation Index – Social Sciences & Humanities (BKCI-SSH)": "BHCI",
    "Book Citation Index – Science (BKCI-S)": "BSCI",
}


def get_child_db(data_str):
    child_db = []
    if len(data_str.strip()) > 0:
        if "&amp;" in data_str:
            data_list = data_str.replace('&amp;', '¥¥¥¥').split(';')
            ll = [db_dict.get(i.replace('¥¥¥¥', '&amp;').strip(), "") for i in data_list]
        else:
            ll = [db_dict.get(i.strip(), "") for i in data_str.split(';')]
        child_db = list(set(ll))
    return child_db


def get_month_day(PD, EA):
    if "-" in PD:
        PD = PD.split("-")[0]
    if "-" in EA:
        EA = EA.split("-")[0]
    cleanMonth = re.sub(r"[^A-Z]", "", PD.upper()).strip()
    cleanEAMonth = re.sub(r"[^A-Z]", "", EA.upper()).strip()
    month = "00"
    if len(cleanMonth) > 1:
        month = mapMonth.get(cleanMonth, "00")
    elif len(cleanEAMonth) > 1:
        month = mapMonth.get(cleanEAMonth, "00")
    day = ""
    if PD.isdigit():
        day = PD
    else:
        tmps_pd = PD.split(" ")
        if len(tmps_pd) == 2:
            if tmps_pd[1].isdigit():
                day = tmps_pd[1]
        elif len(tmps_pd) == 3:
            if tmps_pd[2].isdigit():
                day = tmps_pd[2]
    day = day.zfill(2)
    if month == "00" or len(day) != 2:
        day = "00"
    return month, day


async def getQkInfo(redis_conn, issn, eissn, journal_name):
    journal_name = re.sub(r'[^a-zA-Z0-9]', '', journal_name)
    qk_info = None
    if len(issn) > 0:
        qk_info = await redis_conn.hget("qk_info", issn)
    if qk_info is None and len(eissn) > 0:
        qk_info = await redis_conn.hget("qk_info", eissn)
    if qk_info is None:
        qk_info = await redis_conn.hget("qk_info", "")
    if qk_info:
        qk_info = json.loads(qk_info)
        if len(qk_info) == 1:
            qk_info = list(qk_info.values())[0]
        elif len(qk_info) > 1:
            qk_info = qk_info.get(journal_name)
    return qk_info


def getLanguage(lan):
    language = "EN"
    languagelist = lan.split(";")
    if len(languagelist) > 1:
        return language
    slan = languagelist[0].strip()
    if slan in mapLanguage:
        language = mapLanguage.get(slan)
    return language


def getShowOrgan(C1):
    showorgan = ""
    C1 = re.sub(r"\[.+?\]", "", C1)  # 去掉中括号
    if ";" not in C1:  # 一个机构或空机构
        if C1.strip():
            showorgan = "[1]" + C1.strip()
    else:
        idx = 0
        tmps = C1.split(";")
        for organ in tmps:
            organ = organ.strip()
            idx += 1
            showorgan += "[" + str(idx) + "]" + organ + ";"
    showorgan = re.sub(r";+$", "", showorgan)  # 去掉中括号
    return showorgan


def getWriterMap(C1):
    writerMap = {}
    ls = []
    tmplist = re.findall(r"\[(.+?)\]", C1)
    idx = 0
    for tmp in tmplist:
        ls.append(tmp)
        idx += 1
        writerMap[str(idx)] = tmp
    return writerMap


def getShowWriter(AF, dict_af_au, C1):
    showwriter = ""
    writerMap = getWriterMap(C1)
    idxList = []
    for writer in AF.split(";"):
        writer = writer.strip()
        idxList.clear()
        for k, v in writerMap.items():
            for writerX in v.split(";"):
                writerX = writerX.strip()
                if writer == writerX or dict_af_au.get(writer, None) == writerX:
                    idxList.append(k)
                    break
        idxList.sort()
        idxString = ','.join(idxList)
        if len(idxString) > 0:
            showwriter += writer + "[" + idxString + "];"
        else:
            showwriter += writer + ";"
    showwriter = re.sub(r";+$", "", showwriter)
    return showwriter


def get_wos_accept_date(CY):
    accept_date = ""
    if len(CY) != 0:
        years = ""
        month = ""
        day = ""
        if len(CY) == 4 and CY.isdigit():
            years = CY
        elif len(CY) == 9:
            tmps = CY.split(", ")
            if len(tmps) == 2:
                years = tmps[1].strip()
                month = mapMonth.get(tmps[0].strip(), "")
        elif len(CY) == 12:
            tmps = CY.split(", ")
            if len(tmps) == 2:
                years = tmps[1].strip()
                tmps1 = tmps[0].strip().split(" ")
                if len(tmps1) == 2:
                    month = mapMonth.get(tmps1[0].strip(), "")
                    day = tmps1[1].strip().zfill(2)
        elif len(CY) == 15 or len(CY) == 19:
            tmps = CY.split(", ")
            if len(tmps) == 2:
                years = tmps[1].strip()
                tmps1 = tmps[0].strip().split("-")[0].split(" ")
                if len(tmps1) == 2:
                    month = mapMonth.get(tmps1[0].strip(), "")
                    day = tmps1[1].strip().zfill(2)
        if len(month) == 0:
            month = "00"
        if len(day) < 1 or len(day) > 2:
            day = "00"
        accept_date = years + month + day
    return accept_date


def get_full_abbr_dic(full_txt, abbr_txt):
    full_abbr_dic = {}
    tmp_abbr_list = abbr_txt.split(";")
    tmp_full_list = full_txt.split(";")
    if len(tmp_abbr_list) == len(tmp_full_list):
        for i, item in enumerate(tmp_full_list):
            full_abbr_dic[cleanSemicolon(item)] = cleanSemicolon(tmp_abbr_list[i])
    return full_abbr_dic


def cleanSemicolon(text):
    if text is None:
        return ""
    text = text.replace('；', ';').replace("; ", ";")  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub(" +", " ", text)  # 多个空格转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def set_wos_ref_cited(ref_data, src_data):
    colluid = src_data.get("colluid", "")
    old_linked_id = cleanSemicolon(colluid)
    if len(old_linked_id) == 0:
        id_info = src_data.get("id", {})
        if id_info.get("type", "") == "colluid":
            colluid = id_info.get("value", "")
            old_linked_id = cleanSemicolon(colluid)
    if len(old_linked_id) == 0:
        old_linked_id = cleanSemicolon(src_data.get("ut", ""))
    author_1st = ""
    author = ""
    names = src_data.get("names")
    if names:
        au_item = names.get("author")
        if au_item:
            tmp_list = au_item.get("en", [])
            aus_list = []
            for item in tmp_list:
                if not item:
                    continue
                au_name = item.get("full_name", "")
                if len(au_name) == 0:
                    au_name = item.get("display_name", "")
                if len(au_name) > 0:
                    aus_list.append(au_name)
            if len(aus_list) > 0:
                author_1st = aus_list[0]
                author = ";".join(aus_list)
    ref_data["author_1st"] = author_1st
    ref_data["author"] = author
    publisher = ""
    tmp_list = src_data.get("publishers")
    pub_list = []
    if tmp_list:
        for item in tmp_list:
            pub_names = item.get("names")
            if pub_names:
                for pub_item in pub_names:
                    pub_name = pub_item.get("full_name", "")
                    if len(pub_name) == 0:
                        pub_name = pub_item.get("display_name", "")
                    if len(pub_name) > 0:
                        pub_list.append(pub_name)
        if len(pub_list) > 0:
            publisher = ";".join(pub_list)
    ref_data["publisher"] = publisher
    title = ""
    source_name = ""
    titles = src_data.get("titles")
    if titles:
        t_item = titles.get("item")
        if t_item:
            tmp_list = t_item.get("en", [])
            ti_list = []
            for item in tmp_list:
                if "title" in item:
                    ti_list.append(item["title"])
            if len(ti_list) > 0:
                title = ";".join(ti_list)
        s_item = titles.get("source")
        if s_item:
            tmp_list = s_item.get("en", [])
            s_list = []
            for item in tmp_list:
                if "title" in item:
                    s_list.append(item["title"])
            if len(s_list) > 0:
                source_name = ";".join(s_list)
    ref_data["title"] = title

    doi = src_data.get("doi", "")
    id_list = src_data.get("identifiers")
    eissn = ""
    issn = ""
    if id_list:
        for item in id_list:
            i_type = item.get("type", "")
            i_val = item.get("value", "")
            if i_type == "doi":
                if len(doi) == 0:
                    doi = i_val
            elif i_type == "eissn":
                eissn = i_val
            elif i_type == "issn":
                issn = i_val
            elif i_type == "xref_doi":
                if len(doi) == 0:
                    doi = i_val
    ref_data["doi"] = doi
    # ref_data["eissn"] = eissn
    # ref_data["issn"] = issn
    pub_info = src_data.get("pub_info")
    vol = ""
    num = ""
    begin_page = ""
    end_page = ""
    page_info = ""
    pub_year = ""
    if pub_info:
        vol = pub_info.get("vol", "")
        num = pub_info.get("issue", "")
        begin_page = pub_info.get("begin", "")
        end_page = pub_info.get("end", "")
        page_info = pub_info.get("page_no", "")
        pub_year = pub_info.get("pubyear", "")
    ref_data["vol"] = vol
    ref_data["num"] = num
    ref_data["begin_page"] = begin_page
    ref_data["end_page"] = end_page
    ref_data["page_info"] = page_info
    ref_data["pub_year"] = pub_year

    linked_id = ""
    doctypes = src_data.get("doctypes", [])
    strtype = ""
    sub_db_id = ""
    if len(doctypes) == 1:
        strtype = docType.get(doctypes[0], "")
    if len(strtype) == 0:
        strtype = "K"
    # if strtype == "C":
    #     sub_db_id = "00855"
    #     # source_name = meta.get("meeting_name", "")这里要看下类型是会议的这个字段是什么
    # elif strtype == "J":
    #     sub_db_id = "00854"
    # if len(sub_db_id) > 0:
    #     linked_id = BaseLngid().GetLngid(sub_db_id, old_linked_id, False)
    ref_data["old_linked_id"] = old_linked_id
    ref_data["linked_id"] = linked_id
    ref_data["sub_db_id"] = sub_db_id
    ref_data["strtype"] = strtype
    ref_data["source_name"] = source_name
    refer_text_site = ""
    if len(author) > 0:
        refer_text_site += author.replace(";", ",") + "."
    if len(title) > 0:
        refer_text_site += title + "[{}].".format(strtype)
    if len(source_name) > 0:
        refer_text_site += source_name + ","
    if len(pub_year) > 0:
        refer_text_site += pub_year
    if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
        refer_text_site = refer_text_site[:-1]
    if strtype == "J":
        if len(vol) > 0:
            refer_text_site += "," + vol
        if len(num) > 0:
            refer_text_site += "({})".format(num)
    if strtype in ("J", "M", "D", "C"):
        if len(begin_page) > 0:
            refer_text_site += ":" + begin_page
            if len(end_page) > 0:
                refer_text_site += "-" + end_page
    ref_data["refer_text_site"] = refer_text_site
    return ref_data


def get_out_a_meta(data):
    source_type = data["source_type"]
    a_fields = meta_fileds[source_type]
    dif_list = list(a_fields - set(data.keys()))
    new_row = {}
    for k, v in data.items():
        if v is None:
            v = ""
        if k in a_fields:
            new_row[k] = v
    for filed in dif_list:
        new_row[filed] = ""
    return new_row


def check_data(data):
    if "_id" in data.keys():
        data.pop("_id")
    if "rawid_mysql" in data.keys():
        data.pop("rawid_mysql")
    if "keyid" not in data.keys():
        data["keyid"] = data["lngid"]
    if "doi" not in data.keys():
        data["doi"] = ""
    if len(data["title"]) == 0:
        return -6, 0
    if "is_deprecated" not in data.keys() or data["is_deprecated"] == None or data["is_deprecated"] == "":
        data["is_deprecated"] = "0"
    pub_year = data["pub_year"]
    if len(pub_year) == 0 or len(pub_year) != 4 or pub_year[0:2] not in ("17","18", "19", "20"):
        return -5, 0
    return 1, data


def parse_wosjournal_ref(ref_data, down_dict):
    list_ref = []
    list_ref_id = []
    idx = 0
    for k, src_data in down_dict.items():
        if 'records' in src_data.keys():
            records = src_data["records"]
        else:
            records = src_data["refs"]
        for k, item in records.items():
            if isinstance(item, str):
                item = json.loads(item)
            ref_str = json.dumps(item, ensure_ascii=False)
            idx += 1
            ref = {}
            ref["cited_rawid"] = ref_data["rawid"].replace("WOS:", "")
            ref["cited_lngid"] = ref_data["lngid"]
            ref["cited_pub_year"] = ref_data.get("pub_year", "")
            ref_lngid = "{}{}".format(ref_data["lngid"], str(idx).zfill(4))
            ref["lngid"] = ref_lngid
            ref["keyid"] = ref_lngid
            ref["refer_text_raw"] = ref_str.strip()
            ref = set_wos_ref_cited(ref, item)
            list_ref.append(ref)
            list_ref_id.append(ref_lngid)
    ref_cnt = len(list_ref)
    ref_data["ref_cnt"] = str(ref_cnt)
    if ref_cnt > 0:
        ref_data["ref_id"] = ";".join(list_ref_id)
        ref_data["refer_info"] = list_ref
    return ref_data


def parse_wosjournal_woskeywordsitem_article(src_data, redis_conn):
    data = {}
    UT = src_data.get("UT", "")
    if not UT.startswith("WOS:"):
        return -1
    WE = src_data.get("WE", "")
    child_db_list = get_child_db(WE)
    isqk = False
    lib_list = []
    for temp in child_db_list:
        sdb = mapLib.get(temp)
        if sdb is None:
            return -2
        if "ISTP" not in temp and "ISSHP" not in temp:  # 任意temp出现期刊标识则为期刊
            isqk = True
        lib_list.append(sdb)
    if not isqk or len(lib_list) == 0:
        return -3
    range = ";".join(lib_list)
    rawid = cleanSemicolon(UT.replace("WOS:", ""))

    source_type = "3"
    sub_db_id = "00854"
    product = "WOSCORE"
    sub_db = "QK"
    provider = "CLARIVATE"
    if len(rawid) == 0:
        return -4
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    down_date = src_data["down_date"]
    data["batch"] = batch
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["range"] = range
    data["child_db"] = ";".join(child_db_list)
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://www.webofscience.com/wos/alldb/full-record/WOS:" + rawid
    data["raw_type"] = cleanSemicolon(src_data.get("DT", ""))
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)

    issn = src_data.get("SN", "").strip()
    eissn = src_data.get("EI", "").strip()
    journal_name = src_data.get("SO", "").strip()
    if len(journal_name) == 0:
        journal_name = src_data.get("CT", "").strip()
    country = ""
    language = ""
    journal_raw_id = ""
    jinfo = getQkInfo(redis_conn, issn, eissn, journal_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        journal_raw_id = jinfo.get("journal_raw_id", "")

    data["country"] = country
    data["language"] = language
    data["journal_raw_id"] = journal_raw_id
    data["journal_name"] = journal_name

    data["PA_latest"] = cleanSemicolon(src_data.get("PA", ""))
    data["GP_latest"] = cleanSemicolon(src_data.get("GP", ""))
    data["TC_latest"] = cleanSemicolon(src_data.get("TC", ""))
    data["SE_latest"] = cleanSemicolon(src_data.get("SE", ""))
    data["BN_latest"] = cleanSemicolon(src_data.get("BN", ""))
    data["CL_latest"] = cleanSemicolon(src_data.get("CL", ""))
    data["BE_latest"] = cleanSemicolon(src_data.get("BE", ""))
    data["HC_latest"] = cleanSemicolon(src_data.get("HC", ""))
    data["U2_latest"] = cleanSemicolon(src_data.get("U2", ""))
    data["DA_latest"] = cleanSemicolon(src_data.get("DA", ""))
    data["Z9_latest"] = cleanSemicolon(src_data.get("Z9", ""))
    data["FP_latest"] = cleanSemicolon(src_data.get("FP", ""))
    data["U1_latest"] = cleanSemicolon(src_data.get("U1", ""))
    data["HP_latest"] = cleanSemicolon(src_data.get("HP", ""))
    data["BS_latest"] = cleanSemicolon(src_data.get("BS", ""))
    data["C3_latest"] = cleanSemicolon(src_data.get("C3", ""))
    data["SI_latest"] = cleanSemicolon(src_data.get("SI", ""))
    data["HO_latest"] = cleanSemicolon(src_data.get("HO", ""))
    data["SP_latest"] = cleanSemicolon(src_data.get("SP", ""))
    data["DL_latest"] = cleanSemicolon(src_data.get("DL", ""))
    data["GA_latest"] = cleanSemicolon(src_data.get("GA", ""))
    data["J9_latest"] = cleanSemicolon(src_data.get("J9", ""))
    data["PT_latest"] = cleanSemicolon(src_data.get("PT", ""))
    data["PU_latest"] = cleanSemicolon(src_data.get("PU", ""))
    data["WE_latest"] = cleanSemicolon(src_data.get("WE", ""))
    data["JI_latest"] = cleanSemicolon(src_data.get("JI", ""))
    data["PI_latest"] = cleanSemicolon(src_data.get("PI", ""))
    data["CA_latest"] = cleanSemicolon(src_data.get("CA", ""))
    data["EA_latest"] = cleanSemicolon(src_data.get("EA", ""))
    data["PN_latest"] = cleanSemicolon(src_data.get("PN", ""))
    data["FX_latest"] = cleanSemicolon(src_data.get("FX", ""))
    SU = cleanSemicolon(src_data.get("SU", ""))
    OA = cleanSemicolon(src_data.get("OA", ""))
    data["SU_latest"] = SU
    data["OA_latest"] = OA

    rawid_alt = ""
    PM = cleanSemicolon(src_data.get("PM", ""))
    if len(PM) > 0:
        pm_list = []
        tmp_list = PM.split(";")
        for tmp in tmp_list:
            pm_list.append("pubmed@{}".format(tmp))
        rawid_alt = ";".join(pm_list)
    data["rawid_alt"] = rawid_alt
    data["research_field"] = cleanSemicolon(src_data.get("SC", ""))
    researcher_id = cleanSemicolon(src_data.get("RI", "").strip())
    if len(researcher_id) > 0:
        recid_list = []
        tmp_list = researcher_id.split(";")
        for tmp in tmp_list:
            if "/" in tmp:
                recid_tmps = tmp.split("/")
                recid_list.append("{}@{}".format(recid_tmps[1], recid_tmps[0]))
            else:
                recid_list.append(tmp)
        researcher_id = ";".join(recid_list)
    data["researcher_id"] = researcher_id

    data["ref_cnt"] = cleanSemicolon(src_data.get("NR", ""))
    data["title"] = src_data.get("TI", "").strip()
    data["keyword"] = cleanSemicolon(src_data.get("DE", ""))
    data["subject_word"] = cleanSemicolon(src_data.get("ID", ""))
    data["subject"] = cleanSemicolon(src_data.get("WC", ""))
    conference_abstract = ""
    abstract_ = cleanSemicolon(src_data.get("AB", ""))
    MA = cleanSemicolon(src_data.get("MA", ""))
    if len(abstract_) == 0:
        abstract_ = MA
    else:
        conference_abstract = MA
    data["abstract"] = abstract_
    data["conference_abstract"] = conference_abstract
    data["vol"] = src_data.get("VL", "").strip()
    data["num"] = src_data.get("IS", "").strip()
    doi = src_data.get("DI", "").strip()
    doi_alt = ""
    D2 = src_data.get("D2", "").strip()
    if len(doi) == 0:
        doi = D2
    else:
        doi_alt = D2
    data["doi"] = doi
    data["doi_alt"] = doi_alt
    data["issn"] = issn
    data["eissn"] = eissn
    data["doc_no"] = cleanSemicolon(src_data.get("AR", ""))
    data["begin_page"] = src_data.get("BP", "").strip()
    data["end_page"] = src_data.get("EP", "").strip()
    data["page_cnt"] = src_data.get("PG", "").strip()
    C1 = src_data.get("C1", "").strip()
    data["preferred_organ"] = cleanSemicolon(C1)

    organ = getShowOrgan(C1)
    organ_1st = ""
    if ";" not in C1:  # 一个机构或空机构
        organ_1st = re.sub(r"\[.+?\]", "", C1).strip()
    else:
        organ_1st = re.sub(r"\[.+?\]", "", C1).strip()
        organ_1st = organ_1st.split(";")[0]
    book_author = ""
    book_author_full = ""
    AF = src_data.get("AF", "").strip()
    AU = src_data.get("AU", "").strip()
    BF = src_data.get("BF", "").strip()
    BA = src_data.get("BA", "").strip()
    data["author_raw"] = cleanSemicolon(AU) + "&&" + cleanSemicolon(AF) + "&&" + cleanSemicolon(
        BA) + "&&" + cleanSemicolon(BF)
    AF_AU_dic = get_full_abbr_dic(AF, AU)
    if len(AF) == 0:
        AF = BF
        AF_AU_dic = get_full_abbr_dic(AF, BA)
    else:
        book_author = BA
        book_author_full = BF
    author = getShowWriter(AF, AF_AU_dic, C1).strip()
    if ";" in AF:  # 一个机构或空机构
        author_1st = AF.split(";")[0]
    else:
        author_1st = AF
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    data["author"] = author
    data["book_author"] = book_author
    data["book_author_full"] = book_author_full

    data["author_1st"] = author_1st
    corr_author = src_data.get("RP", "").strip()
    if len(corr_author) > 0 and "(reprint author)" in corr_author:
        corr_author = corr_author[0:corr_author.find("(reprint author)")]
    data["corr_author"] = cleanSemicolon(corr_author)
    orc_id = cleanSemicolon(src_data.get("OI", "").strip())
    if len(orc_id) > 0:
        orc_list = []
        tmp_list = orc_id.split(";")
        for tmp in tmp_list:
            if "/" in tmp:
                orc_tmps = tmp.split("/")
                orc_list.append("{}@{}".format(orc_tmps[1], orc_tmps[0]))
            else:
                orc_list.append(tmp)
        orc_id = ";".join(orc_list)
    data["orc_id"] = orc_id
    email = ""
    em_list = cleanSemicolon(src_data.get("EM", "")).split(";")
    if len(em_list) > 0:
        tmp_list = []
        for item in em_list:
            if item.strip():
                tmp_list.append("{}:*".format(cleanSemicolon(item)))
        email = ";".join(tmp_list)
    data["email"] = email
    data["fund"] = cleanSemicolon(src_data.get("FU", ""))
    PD = src_data.get("PD", "").strip()
    EA = src_data.get("EA", "").strip()
    month,day = get_month_day(PD,EA)

    PY = src_data.get("PY", "").strip()
    pub_date = ""
    pub_year = ""
    if len(PY) > 1:
        pub_year = PY
        pub_date = pub_year + month + day
    elif src_data.get("years", ""):
        raise Exception("从数据库拿数据")
    elif len(EA) > 1:
        pub_year = re.sub(r"[a-zA-Z]", "", EA).strip()
        # pub_year = EA
        pub_date = pub_year + month + day

    pub_year = pub_year.strip()
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    CY = cleanSemicolon(src_data.get("CY", ""))
    data["accept_date"] = get_wos_accept_date(CY)
    is_oa = "0"
    is_suppl = "0"
    if len(SU) > 0:
        is_suppl = "1"
    if len(OA) > 0:
        is_oa = "1"
    data["is_suppl"] = is_suppl
    data["is_oa"] = is_oa
    return data


def parse_woscoreconference_woskeywordsitem_article(src_data):
    data = {}
    UT = src_data.get("UT", "")
    if not UT.startswith("WOS:"):
        return -1
    WE = src_data.get("WE", "")
    child_db_list = get_child_db(WE)
    ishy = False
    lib_list = []
    for temp in child_db_list:
        sdb = mapLib.get(temp)
        if sdb is None:
            return -2
        lib_list.append(sdb)

    if "ISTP" in child_db_list or "ISSHP" in child_db_list:
        ishy = True
    if not ishy or len(lib_list) == 0:
        return -3
    range = ";".join(lib_list)
    rawid = cleanSemicolon(UT.replace("WOS:", ""))

    source_type = "6"
    sub_db_id = "00855"
    product = "WOSCORE"
    sub_db = "HY"
    provider = "CLARIVATE"
    if len(rawid) == 0:
        return -4
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    down_date = src_data["down_date"]
    data["batch"] = batch
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["range"] = range
    data["child_db"] = ";".join(child_db_list)
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = "https://www.webofscience.com/wos/alldb/full-record/WOS:" + rawid
    data["raw_type"] = cleanSemicolon(src_data.get("DT", ""))
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)

    issn = src_data.get("SN", "").strip()
    eissn = src_data.get("EI", "").strip()

    data["country"] = ""
    data["language"] = getLanguage(src_data.get("LA", "").strip())

    data["meeting_date_raw"] = cleanSemicolon(src_data.get("CY", "").strip())
    data["meeting_record_name"] = cleanSemicolon(src_data.get("SO", "").strip())
    data["meeting_place"] = cleanSemicolon(src_data.get("CL", "").strip())
    data["meeting_name"] = cleanSemicolon(src_data.get("CT", "").strip())
    data["host_organ"] = cleanSemicolon(src_data.get("HO", "").strip())
    data["sponsor"] = cleanSemicolon(src_data.get("SP", "").strip())
    data["publisher"] = cleanSemicolon(src_data.get("PU", "").strip())
    data["pub_place"] = cleanSemicolon(src_data.get("PA", "").strip())
    data["title_series"] = cleanSemicolon(src_data.get("SE", ""))
    data["isbn"] = cleanSemicolon(src_data.get("BN", ""))
    data["title_sub"] = cleanSemicolon(src_data.get("BS", ""))

    data["GP_latest"] = cleanSemicolon(src_data.get("GP", ""))
    data["TC_latest"] = cleanSemicolon(src_data.get("TC", ""))
    data["BE_latest"] = cleanSemicolon(src_data.get("BE", ""))
    data["HC_latest"] = cleanSemicolon(src_data.get("HC", ""))
    data["U2_latest"] = cleanSemicolon(src_data.get("U2", ""))
    data["DA_latest"] = cleanSemicolon(src_data.get("DA", ""))
    data["Z9_latest"] = cleanSemicolon(src_data.get("Z9", ""))
    data["FP_latest"] = cleanSemicolon(src_data.get("FP", ""))
    data["U1_latest"] = cleanSemicolon(src_data.get("U1", ""))
    data["HP_latest"] = cleanSemicolon(src_data.get("HP", ""))
    data["C3_latest"] = cleanSemicolon(src_data.get("C3", ""))
    data["SI_latest"] = cleanSemicolon(src_data.get("SI", ""))
    data["DL_latest"] = cleanSemicolon(src_data.get("DL", ""))
    data["GA_latest"] = cleanSemicolon(src_data.get("GA", ""))
    data["J9_latest"] = cleanSemicolon(src_data.get("J9", ""))
    data["PT_latest"] = cleanSemicolon(src_data.get("PT", ""))
    data["WE_latest"] = cleanSemicolon(src_data.get("WE", ""))
    data["JI_latest"] = cleanSemicolon(src_data.get("JI", ""))
    data["PI_latest"] = cleanSemicolon(src_data.get("PI", ""))
    data["CA_latest"] = cleanSemicolon(src_data.get("CA", ""))
    data["EA_latest"] = cleanSemicolon(src_data.get("EA", ""))
    data["PN_latest"] = cleanSemicolon(src_data.get("PN", ""))
    data["FX_latest"] = cleanSemicolon(src_data.get("FX", ""))
    SU = cleanSemicolon(src_data.get("SU", ""))
    OA = cleanSemicolon(src_data.get("OA", ""))
    data["SU_latest"] = SU
    data["OA_latest"] = OA

    rawid_alt = ""
    PM = cleanSemicolon(src_data.get("PM", ""))
    if len(PM) > 0:
        pm_list = []
        tmp_list = PM.split(";")
        for tmp in tmp_list:
            pm_list.append("pubmed@{}".format(tmp))
        rawid_alt = ";".join(pm_list)
    data["rawid_alt"] = rawid_alt
    data["research_field"] = cleanSemicolon(src_data.get("SC", ""))
    researcher_id = cleanSemicolon(src_data.get("RI", "").strip())
    if len(researcher_id) > 0:
        recid_list = []
        tmp_list = researcher_id.split(";")
        for tmp in tmp_list:
            if "/" in tmp:
                recid_tmps = tmp.split("/")
                recid_list.append("{}@{}".format(recid_tmps[1], recid_tmps[0]))
            else:
                recid_list.append(tmp)
        researcher_id = ";".join(recid_list)
    data["researcher_id"] = researcher_id

    data["ref_cnt"] = cleanSemicolon(src_data.get("NR", ""))
    data["title"] = src_data.get("TI", "").strip()
    data["keyword"] = cleanSemicolon(src_data.get("DE", ""))
    data["subject_word"] = cleanSemicolon(src_data.get("ID", ""))
    data["subject"] = cleanSemicolon(src_data.get("WC", ""))
    conference_abstract = ""
    abstract_ = cleanSemicolon(src_data.get("AB", ""))
    MA = cleanSemicolon(src_data.get("MA", ""))
    if len(abstract_) == 0:
        abstract_ = MA
    else:
        conference_abstract = MA
    data["abstract"] = abstract_
    data["conference_abstract"] = conference_abstract
    data["vol"] = src_data.get("VL", "").strip()
    data["num"] = src_data.get("IS", "").strip()
    doi = src_data.get("DI", "").strip()
    doi_alt = ""
    D2 = src_data.get("D2", "").strip()
    if len(doi) == 0:
        doi = D2
    else:
        doi_alt = D2
    data["doi"] = doi
    data["doi_alt"] = doi_alt
    data["issn"] = issn
    data["eissn"] = eissn
    data["doc_no"] = cleanSemicolon(src_data.get("AR", ""))
    data["begin_page"] = src_data.get("BP", "").strip()
    data["end_page"] = src_data.get("EP", "").strip()
    data["page_cnt"] = src_data.get("PG", "").strip()
    C1 = src_data.get("C1", "").strip()
    data["preferred_organ"] = cleanSemicolon(C1)
    organ = getShowOrgan(C1)
    if ";" not in C1:  # 一个机构或空机构
        organ_1st = re.sub(r"\[.+?\]", "", C1).strip()
    else:
        organ_1st = re.sub(r"\[.+?\]", "", C1).strip()
        organ_1st = organ_1st.split(";")[0]
    book_author = ""
    book_author_full = ""
    AF = src_data.get("AF", "").strip()
    AU = src_data.get("AU", "").strip()
    BF = src_data.get("BF", "").strip()
    BA = src_data.get("BA", "").strip()
    data["author_raw"] = cleanSemicolon(AU) + "&&" + cleanSemicolon(AF) + "&&" + cleanSemicolon(
        BA) + "&&" + cleanSemicolon(BF)
    AF_AU_dic = get_full_abbr_dic(AF, AU)
    if len(AF) == 0:
        AF = BF
        AF_AU_dic = get_full_abbr_dic(AF, BA)
    else:
        book_author = BA
        book_author_full = BF
    author = getShowWriter(AF, AF_AU_dic, C1).strip()
    if ";" in AF:  # 一个机构或空机构
        author_1st = AF.split(";")[0]
    else:
        author_1st = AF
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    data["author"] = author
    data["book_author"] = book_author
    data["book_author_full"] = book_author_full

    data["author_1st"] = author_1st
    corr_author = src_data.get("RP", "").strip()
    if len(corr_author) > 0 and "(reprint author)" in corr_author:
        corr_author = corr_author[0:corr_author.find("(reprint author)")]
    data["corr_author"] = cleanSemicolon(corr_author)
    orc_id = cleanSemicolon(src_data.get("OI", "").strip())
    if len(orc_id) > 0:
        orc_list = []
        tmp_list = orc_id.split(";")
        for tmp in tmp_list:
            if "/" in tmp:
                orc_tmps = tmp.split("/")
                orc_list.append("{}@{}".format(orc_tmps[1], orc_tmps[0]))
            else:
                orc_list.append(tmp)
        orc_id = ";".join(orc_list)
    data["orc_id"] = orc_id
    email = ""
    em_list = cleanSemicolon(src_data.get("EM", "")).split(";")
    if len(em_list) > 0:
        tmp_list = []
        for item in em_list:
            if item.strip():
                tmp_list.append("{}:*".format(cleanSemicolon(item)))
        email = ";".join(tmp_list)
    data["email"] = email
    data["fund"] = cleanSemicolon(src_data.get("FU", ""))
    PD = src_data.get("PD", "").strip()
    EA = src_data.get("EA", "").strip()
    month, day = get_month_day(PD, EA)

    PY = src_data.get("PY", "").strip()
    pub_date = ""
    pub_year = ""
    if len(PY) > 1:
        pub_year = PY
        pub_date = pub_year + month + day
    elif src_data.get("years", ""):
        raise Exception("从数据库拿数据")
    elif len(EA) > 1:
        pub_year = re.sub(r"[a-zA-Z]", "", EA).strip()
        # pub_year = EA
        pub_date = pub_year + month + day
    pub_year = pub_year.strip()
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_oa = "0"
    is_suppl = "0"
    if len(SU) > 0:
        is_suppl = "1"
    if len(OA) > 0:
        is_oa = "1"
    data["is_suppl"] = is_suppl
    data["is_oa"] = is_oa
    return data


if __name__ == '__main__':
    print(get_month_day("2023 OCT 4",""))
