# encoding=utf-8
import json
import re
import time

from re_common.vip.baseencodeid import BaseLngid



def deal_data(data_dicts):
    key_dicts = {
        "Monograph title":"Monograph_title",
        "Volume":"Volume",
        "Sponsor":"Sponsor",
        "Controlled/Subject terms":"Controlled_Subject_terms",
        "Uncontrolled terms":"Uncontrolled_terms",
        "Copyright":"Copyright",
        "Source":"Source",
        "Author affiliation":"Author_affiliation",
        "ISBN":"ISBN",
        "Accession number":"Accession_number",
        "Classification code":"Classification_code",
        "Standard ID":"Standard_ID",
        "Funding details":"Funding_details",
        "Abbreviated source title":"Abbreviated_source_title",
        "Link to ProQuest Dissertations":"Link_to_ProQuest_Dissertations",
        "Article number":"Article_number",
        "Article or Paper number":"Article_or_Paper_number",
        "Publication date":"Publication_date",
        "Main Heading":"Main_Heading",
        "Corresponding author(s)":"Corresponding_author_s_",
        "Country of application":"Country_of_application",
        "Conference name":"Conference_name",
        "ISSN":"ISSN",
        "Funding text":"Funding_text",
        "Editor":"Editor",
        "Publisher":"Publisher",
        "Publisher/Repository": "Publisher",
        "Open Access type(s)":"Open_Access_type_s_",
        "Inventor":"Inventor",
        "Issue date":"Issue_date",
        "Standard url":"Standard_url",
        "Issue":"Issue",
        "Document type":"Document_type",
        "Assignee":"Assignee",
        "Treatment":"Treatment",
        "Conference date":"Conference_date",
        "CODEN":"CODEN",
        "Publication year":"Publication_year",
        "Author":"Author",
        "Number of references":"Number_of_references",
        "Database":"Database",
        "Volume title":"Volume_title",
        "Title":"Title",
        "DOI":"DOI",
        "Abstract type":"Abstract_type",
        "Part number":"Part_number",
        "Standard designation":"Standard_designation",
        "Status":"Status",
        "Conference location":"Conference_location",
        "Data Provider":"Data_Provider",
        "Language":"Language",
        "Filing date":"Filing_date",
        "Pages":"Pages",
        "Conference code":"Conference_code",
        "Patent number":"Patent_number",
        "Versions":"Versions",
        "Patent issue date":"Patent_issue_date",
        "Title of translation":"Title_of_translation",
        "Abstract":"Abstract",
        "ISBN13":"ISBN13",
        "E-ISSN":"E_ISSN",
        "Paper number":"Paper_number"
    }
    new_dicts = {}
    for k, v in data_dicts.items():
        if k in key_dicts.keys():
            new_dicts[key_dicts[k]] = "" if v is None else v
        else:
            new_dicts[k] = "" if v is None else v
    return new_dicts


def getShowOrgan(C1):
    showorgan = ""
    C1 = re.sub(r"\[.+?\]", "",C1)# 去掉中括号
    if ";" not in C1: # 一个机构或空机构
        showorgan = C1.strip()
    else:
        idx = 0
        tmps = C1.split(";")
        for organ in tmps:
            organ = organ.strip()
            idx += 1
            showorgan += "[" + str(idx) + "]" + organ + ";"
    showorgan = re.sub(r";+$", "",showorgan)# 去掉中括号
    return showorgan


def getWriterMap(C1):
    writerMap = {}
    ls = []
    tmplist = re.findall(r"\[(.+?)\]",C1)
    idx = 0
    for tmp in tmplist:
        ls.append(tmp)
        idx += 1
        writerMap[str(idx)] = tmp
    return writerMap


def getShowWriter(AF, C1):
    showwriter = ""
    writerMap = getWriterMap(C1)
    idxList = []
    for writer in AF.split(";"):
        writer = writer.strip()
        idxList.clear()
        for k,v in writerMap.items():
            for writerX in v.split(";"):
                writerX = writerX.strip()
                if writer == writerX:
                    idxList.append(k)
                    break
        idxList.sort()
        idxString = ','.join(idxList)
        if len(idxString) > 0:
            showwriter += writer + "[" + idxString + "];"
        else:
            showwriter += writer + ";"
    showwriter = re.sub(r";+$", "",showwriter)
    return showwriter


def initlanguageMap():
    codelanguageMap = {}
    str = "AA★AA;AB★AB;AE★AE;AF★AF;AK★AK;AM★AM;AN★AN;AR★AR;AS★AS;AV★AV;AY★AY;AZ★AZ;BA★BA;BE★BE;BG★BG;BH★BH;BI★BI;BM★BM;BN★BN;BO★BO;BR★BR;BS★BS;CA★CA;CE★CE;CH★CH;CO★CO;CR★CR;CS★CS;CU★CU;CV★CV;CY★CY;DA★DA;DE★DE;DV★DV;DZ★DZ;EE★EE;EL★EL;EN★EN;EO★EO;ES★ES;ET★ET;EU★EU;FA★FA;FF★FF;FI★FI;FJ★FJ;FO★FO;FR★FR;FY★FY;GA★GA;GD★GD;GL★GL;GN★GN;GU★GU;GV★GV;HA★HA;HE★HE;HI★HI;HO★HO;HR★HR;HT★HT;HU★HU;HY★HY;HZ★HZ;IA★IA;ID★ID;IE★IE;IG★IG;II★II;IK★IK;IO★IO;IS★IS;IT★IT;IU★IU;JA★JA;JV★JV;KA★KA;KG★KG;KI★KI;KJ★KJ;KK★KK;KL★KL;KM★KM;KN★KN;KO★KO;KR★KR;KS★KS;KU★KU;KV★KV;KW★KW;KY★KY;LA★LA;LB★LB;LG★LG;LI★LI;LN★LN;LO★LO;LT★LT;LU★LU;LV★LV;MD★MD;MG★MG;MH★MH;MI★MI;MK★MK;ML★ML;MN★MN;MO★MO;MR★MR;MS★MS;MT★MT;MY★MY;NA★NA;NB★NB;ND★ND;NE★NE;NG★NG;NL★NL;NN★NN;NO★NO;NR★NR;NV★NV;NY★NY;OC★OC;OJ★OJ;OM★OM;OR★OR;OS★OS;PA★PA;PI★PI;PL★PL;PS★PS;PT★PT;QU★QU;RM★RM;RN★RN;RO★RO;RU★RU;RW★RW;SA★SA;SC★SC;SD★SD;SE★SE;SG★SG;SH★SH;SI★SI;SK★SK;SL★SL;SM★SM;SN★SN;SO★SO;SQ★SQ;SR★SR;SS★SS;ST★ST;SU★SU;SV★SV;SW★SW;TA★TA;TE★TE;TG★TG;TH★TH;TI★TI;TK★TK;TL★TL;TN★TN;TO★TO;TR★TR;TS★TS;TT★TT;TW★TW;TY★TY;UG★UG;UK★UK;UR★UR;UZ★UZ;VE★VE;VI★VI;VO★VO;WA★WA;WO★WO;XH★XH;YI★YI;YO★YO;ZA★ZA;ZH★ZH;ZU★ZU;AA★AAR;AA★Afar;AB★ABK;AB★Abkhazian;AE★AVE;AE★Avestan;AF★AFR;AF★Afrikaans;AK★AKA;AK★AKA + 2;AK★Akan;AM★AMH;AM★Amharic;AN★Aragonese;AN★ARG;AR★ARA;AR★ARA + 30;AR★Arabic;AS★ASM;AS★Assamese;AV★AVA;AV★Avaric;AY★AYM;AY★AYM + 2;AY★Aymara;AZ★AZE;AZ★AZE + 2;AZ★Azerbaijani;BA★BAK;BA★Bashkir;BE★BEL;BE★Belarusian;BG★BUL;BG★Bulgarian;BH★BIH;BH★Bihari;BH★None;BI★BIS;BI★Bislama;BM★BAM;BM★Bambara;BN★BEN;BN★Bengali;BO★BOD;BO★TIB;BO★Tibetan;BR★BRE;BR★Breton;BS★BOS;BS★Bosnian;CA★CAT;CA★Catalan;CE★CHE;CE★Chechen;CH★CHA;CH★Chamorro;CO★Corsican;CO★COS;CR★CRE;CR★CRE + 6;CR★Cree;CS★CES;CS★CZE;CS★Czech;CU★CHU;CU★Church Slavic;CV★Chuvash;CV★CHV;CY★CYM;CY★WEL;CY★Welsh;DA★DAN;DA★Danish;DE★DEU;DE★GER;DE★German;DV★DIV;DV★Divehi;DZ★DZO;DZ★Dzongkha;EE★EWE;EL★ELL;EL★GRE;EL★Greek;EN★ENG;EN★English;EO★EPO;EO★Esperanto;ES★Castilian;ES★SPA;ES★Spanish;ET★EST;ET★Estonian;EU★BAQ;EU★Basque;EU★EUS;FA★FAS;FA★FAS + 1;FA★FAS + 2;FA★PER;FA★Persian;FF★FUL;FF★FUL + 9;FF★Fulah;FI★FIN;FI★Finnish;FJ★FIJ;FJ★Fijian;FO★FAO;FO★Faroese;FR★FRA;FR★FRE;FR★French;FY★FRY;FY★FRY + 3;FY★Western Frisian;GA★GLE;GA★Irish;GD★GLA;GD★Scottish Gaelic;GL★Galician;GL★GLG;GN★GRN;GN★GRN + 5;GN★Guaraní;GU★GUJ;GU★Gujarati;GV★GLV;GV★Manx;HA★HAU;HA★Hausa;HE★HEB;HE★Hebrew;HI★HIN;HI★Hindi;HO★Hiri Motu;HO★HMO;HR★Croatian;HR★HRV;HR★SCR;HT★Haitian Creole;HT★HAT;HU★HUN;HU★Hungarian;HY★ARM;HY★Armenian;HY★HYE;HZ★HER;HZ★Herero;IA★INA;IA★Interlingua;IA★International Auxiliary Language Association;ID★IND;ID★Indonesian;IE★ILE;IE★Interlingue;IG★IBO;IG★Igbo;II★III;II★Sichuan Yi;IK★Inupiaq;IK★IPK;IK★IPK + 2;IO★IDO;IS★ICE;IS★Icelandic;IS★ISL;IT★ITA;IT★Italian;IU★IKU;IU★IKU + 2;IU★Inuktitut;JA★Japanese;JA★JPN;JV★JAV;JV★Javanese;KA★GEO;KA★Georgian;KA★KAT;KG★KON;KG★KON + 3;KG★Kongo;KI★KIK;KI★Kikuyu;KJ★KUA;KJ★Kwanyama;KK★KAZ;KK★Kazakh;KL★KAL;KL★Kalaallisut;KM★KHM;KM★Khmer;KN★KAN;KN★Kannada;KO★KOR;KO★Korean;KR★Kanuri;KR★KAU;KR★KAU + 3;KS★KAS;KS★Kashmiri;KU★KUR;KU★KUR + 3;KU★Kurdish;KV★KOM;KV★KOM + 2;KV★Komi;KW★COR;KW★Cornish;KY★KIR;KY★Kirghiz;LA★LAT;LA★Latin;LB★LTZ;LB★Luxembourgish;LG★Ganda;LG★LUG;LI★LIM;LI★Limburgish;LN★LIN;LN★Lingala;LO★LAO;LT★LIT;LT★Lithuanian;LU★LUB;LU★Luba-Katanga;LV★Latvian;LV★LAV;MD★Moldovan;MG★Malagasy;MG★MLG;MG★MLG + 10;MH★MAH;MH★Marshallese;MI★MAO;MI★Māori;MI★MRI;MK★MAC;MK★Macedonian;MK★MKD;ML★MAL;ML★Malayalam;MN★MON;MN★MON + 2;MN★Mongolian;MO★MOL;MO★Moldavian;MR★MAR;MR★Marathi;MS★Malay;MS★MAY;MS★MSA;MS★MSA + 12;MS★MSA + 13;MT★Maltese;MT★MLT;MY★BUR;MY★Burmese;MY★MYA;NA★NAU;NA★Nauru;NB★NOB;NB★Norwegian Bokmål;ND★NDE;ND★North Ndebele;NE★NEP;NE★Nepali;NG★NDO;NG★Ndonga;NL★DUT;NL★Dutch;NL★NLD;NN★NNO;NN★Norwegian Nynorsk;NO★NOR;NO★NOR + 2;NO★Norwegian;NR★NBL;NR★South Ndebele;NV★NAV;NV★Navajo;NY★Chichewa;NY★NYA;OC★Occitan;OC★OCI;OJ★OJI;OJ★OJI + 7;OJ★Ojibwa;OM★ORM;OM★ORM + 4;OM★Oromo;OR★ORI;OR★Oriya;OS★OSS;OS★Ossetian;PA★PAN;PA★Panjabi;PI★Pāli;PI★PLI;PL★POL;PL★Polish;PS★Pashto;PS★PUS;PS★PUS + 3;PT★POR;PT★Portuguese;QU★QUE;QU★QUE + 44;QU★Quechua;RM★Raeto-Romance;RM★ROH;RN★Kirundi;RN★RUN;RO★Romanian;RO★RON;RO★RUM;RU★RUS;RU★Russian;RW★KIN;RW★Kinyarwanda;SA★SAN;SA★Sanskrit;SC★Sardinian;SC★SRD;SC★SRD + 4;SD★Sindhi;SD★SND;SE★Northern Sami;SE★SME;SG★SAG;SG★Sango;SH★HBS ;SH★HBS + 3;SH★Serbo-Croatian;SI★SIN;SI★Sinhalese;SK★SLK;SK★SLO;SK★Slovak;SL★Slovenian;SL★SLV;SM★Samoan;SM★SMO;SN★Shona;SN★SNA;SO★SOM;SO★Somali;SQ★ALB;SQ★Albanian;SQ★SQI;SQ★SQI + 3;SQ★SQI + 4;SR★SCC;SR★Serbian;SR★SRP;SS★SSW;SS★Swati;ST★SOT;ST★Sotho;SU★SUN;SU★Sundanese;SV★SWE;SV★Swedish;SW★SWA;SW★SWA + 2;SW★Swahili;TA★TAM;TA★Tamil;TE★TEL;TE★Telugu;TG★Tajik;TG★TGK;TH★THA;TH★Thai;TI★Tigrinya;TI★TIR;TK★TUK;TK★Turkmen;TL★Tagalog;TL★TGL;TN★TSN;TN★Tswana;TO★TON;TO★Tonga;TR★TUR;TR★Turkish;TS★TSO;TS★Tsonga;TT★TAT;TT★Tatar;TW★TWI;TY★TAH;TY★Tahitian;UG★UIG;UG★Uyghur;UK★UKR;UK★Ukrainian;UR★URD;UR★Urdu;UZ★UZB;UZ★UZB + 2;UZ★Uzbek;VE★VEN;VE★Venda;VI★VIE;VI★Vietnamese;VO★VOL;VO★Volapük;WA★Walloon;WA★WLN;WO★WOL;WO★Wolof;XH★XHO;XH★Xhosa;YI★YID;YI★YID + 2;YI★Yiddish;YO★YOR;YO★Yoruba;ZA★ZHA;ZA★ZHA + 2;ZA★Zhuang;ZH★CHI;ZH★Chinese;ZH★ZHO;ZH★ZHO + 12;ZH★ZHO + 13;ZU★ZUL;ZU★Zulu"
    tmps = str.split(";")
    for line in tmps:
        line = line.strip()
        if len(line) < 3:
            continue
        vec = line.split("★")
        if len(vec) < 2:
            continue
        lan = vec[1].upper().strip()
        code = vec[0].strip()
        if len(lan) < 1:
            continue
        if len(code) < 1:
            continue
        codelanguageMap[lan] = code
    return codelanguageMap


def splitpage(str_value):
    jump = ""
    pagestart = ""
    pageend = ""
    listarray = str_value.replace("p","").strip().split(",")
    if len(listarray) == 1:
        jump = ""
    else:
        jump = listarray[1].strip()
    page = listarray[0]
    listpage = page.split("-")
    if len(listpage) == 2:
        pagestart = listpage[0]
        pageend = listpage[1]
    return [pagestart, pageend, jump]

def get_by_regex(pattern, target_string):
    # 编译正则表达式模式
    regex = re.compile(pattern)
    # 使用 findall 方法找到所有匹配项
    matches = regex.findall(target_string)
    return matches

def format_date(date):
    month_map = {
        "january": "01",
        "february": "02",
        "februaryy": "02",
        "march": "03",
        "april": "04",
        "may": "05",
        "june": "06",
        "july": "07",
        "august": "08",
        "september": "09",
        "october": "10",
        "november": "11",
        "december": "12",
        "jan": "01",
        "feb": "02",
        "mar": "03",
        "apr": "04",
        "aug": "08",
        "sept": "09",
        "oct": "10",
        "nov": "11",
        "dec": "12",
    }
    year = ""
    month = "00"
    day = "00"
    date = ''.join(e for e in date if e.isalnum() or e.isspace() or e in ('-','/'))
    arr = date.split()
    for tmp in arr:
        # reg_year = "^\d{4}"
        if tmp.isdigit() and len(tmp) == 4:
            year = tmp
            continue
        # reg_day = "^\d{1,2}"
        if tmp.isdigit() and len(tmp) <= 2:
            day = tmp.zfill(2)
            continue
        tmp = tmp.lower()
        if "-" in tmp:
            tmp = tmp.split("-")[0]
        elif "/" in tmp:
            tmp = tmp.split("/")[0]
        month = month_map.get(tmp,"00")
    if len(year) == 0:
        return ""
    if month == "00":
        return year + "0000"
    return year + month + day


async def getQkInfo(redis_conn, issn, eissn, journal_name):
    journal_name = re.sub(r'[^a-zA-Z0-9]', '', journal_name)
    qk_info = None
    if len(issn) > 0:
        qk_info = await redis_conn.hget("qk_info", issn)
    if qk_info is None and len(eissn) > 0:
        qk_info = await redis_conn.hget("qk_info", eissn)
    if qk_info is None:
        qk_info = await redis_conn.hget("qk_info", "")
    if qk_info:
        qk_info = json.loads(qk_info)
        if len(qk_info) == 1:
            qk_info = list(qk_info.values())[0]
        elif len(qk_info) > 1:
            qk_info = qk_info.get(journal_name)
    return qk_info


def cleanSemicolon(text):
    if text is None:
        return ""
    if isinstance(text,int):
        text = str(text)
    text = text.replace('；', ';').replace("; ", ";")  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub(" +", " ", text)  # 多个空格转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def parse_eijournal_article_csv(src_data,redis_conn):
    src_data = deal_data(src_data)
    codelanguageMap = initlanguageMap()
    data = {} 
    rawid = cleanSemicolon(src_data.get("Accession_number", ""))
    raw_type = cleanSemicolon(src_data.get("Document_type", ""))
    if "conference" in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "3"
    sub_db_id = "00017"
    product = "EI"
    sub_db = "QK"
    provider = "ELSEVIER"
    down_date = src_data["down_date"]
    rawid_alt = src_data["rawid_alt"]
    if rawid_alt is None:
        rawid_alt = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    provider_url = ""
    if len(rawid_alt) > 0:
        provider_url = "https://www.engineeringvillage.com/app/doc/?docid=" + rawid_alt
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["rawid_alt"] = rawid_alt
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    issn = cleanSemicolon(src_data.get("ISSN", ""))
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)

    eissn = cleanSemicolon(src_data.get("E_ISSN", ""))
    if len(eissn) > 0:
        if len(eissn) == 8:
            eissn = "{}-{}".format(eissn[0:4], eissn[4:8])
        elif "-" not in eissn:
            res = "{:8s}".format(eissn)
            eissn = res.replace(" ", "0")
            sb = list(eissn)
            sb.insert(4, '-')
            eissn = ''.join(sb)
    journal_name = src_data.get("Source", "").strip()
    country = ""
    language = ""
    journal_raw_id = ""
    jinfo = getQkInfo(redis_conn, issn, "", journal_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        journal_raw_id = jinfo.get("journal_raw_id", "")
    data["country"] = country
    data["journal_raw_id"] = journal_raw_id
    if len(language) == 0:
        lgstr = src_data.get("Language", "").strip()
        lgstr = lgstr.replace("and", ",").replace(".", "").strip().replace("Chinese English", "Chinese;English")
        lgstr = lgstr
        if len(lgstr) > 0:
            lgStrings = lgstr.upper().split(";")
            hashSet = set()
            if len(lgStrings) > 1:
                for lg in lgStrings:
                    tmp = codelanguageMap.get(lg, "")
                    if tmp and len(tmp) > 0:
                        hashSet.add(tmp)
                language = ";".join(hashSet)
            else:
                language = codelanguageMap.get(lgstr.upper(), "")
            if not language:
                language = ""
        else:
            language = ""
    if len(language) == 0:
        language = "UN"
    data["language"] = language
    data["journal_name"] = journal_name
    data["journal_name_alt"] = cleanSemicolon(src_data.get("Abbreviated_source_title", ""))
    doc_no = cleanSemicolon(src_data.get("Article_number", ""))
    if len(doc_no) == 0:
        doc_no = cleanSemicolon(src_data.get("Article_or_Paper_number", ""))
    data["doc_no"] = doc_no
    subject = cleanSemicolon(src_data.get("Classification_code", ""))
    tmps = subject.split("-")
    sb1 = []
    for tmp in tmps:
        tmp = tmp.strip()
        if len(tmp) > 6:
            if tmp[:2].isdigit() and ' ' in tmp:
                index_of_space = tmp.find(' ')
                sb1.append(f"{tmp[:index_of_space].strip()}@{tmp[index_of_space + 1:].strip()}")
    if len(sb1) > 0:
        subject = ";".join(sb1)
    else:
        subject = ""
    data["subject"] = subject
    data["keyword"] = cleanSemicolon(src_data.get("Main_Heading", ""))
    controlled_terms = cleanSemicolon(src_data.get("Controlled_Subject_terms", ""))
    uncontrolled_terms = cleanSemicolon(src_data.get("Uncontrolled_terms", ""))
    tmps_co = controlled_terms.split("-")
    tmps_un = uncontrolled_terms.split("-")
    subject_word = ""
    sb2 = []
    for item in tmps_co:
        tmp = cleanSemicolon(item)
        if len(tmp) > 0:
            sb2.append(tmp)
    for item in tmps_un:
        tmp = cleanSemicolon(item)
        if len(tmp) > 0:
            sb2.append(tmp)
    if len(sb2) > 0:
        subject_word = ";".join(sb2)
    data["subject_word"] = subject_word
    ref_cnt = src_data.get("Number_of_references", "")
    if len(ref_cnt) == 0:
        ref_cnt = "0"
    data["ref_cnt"] = ref_cnt
    data["title"] = cleanSemicolon(src_data.get("Title", ""))
    data["title_alt"] = cleanSemicolon(src_data.get("Title_of_translation", ""))
    data["abstract"] = cleanSemicolon(src_data.get("Abstract", ""))
    data["abstract_type"] = cleanSemicolon(src_data.get("Abstract_type", ""))
    data["vol"] = cleanSemicolon(src_data.get("Volume", ""))
    data["num"] = cleanSemicolon(src_data.get("Issue", ""))
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    data["issn"] = issn
    data["eissn"] = eissn
    page_info = cleanSemicolon(src_data.get("Pages", ""))
    arraypage = splitpage(page_info)
    begin_page = arraypage[0]
    end_page = arraypage[1]
    jump_page = arraypage[2]
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["jump_page"] = jump_page
    data["page_info"] = page_info
    data["sub_db_class_name"] = cleanSemicolon(src_data.get("CODEN", ""))
    author = cleanSemicolon(src_data.get("Author", ""))
    author = author.replace('(', '[').replace(")", "]").strip()
    organ = cleanSemicolon(src_data.get("Author_affiliation", ""))
    organ = organ.replace('(', '[').replace(")", "]").strip()
    author = cleanSemicolon(author)
    organ = cleanSemicolon(organ)
    author_1st = author.split(";")[0].split("[")[0].strip()
    organ_1st = organ.split(";")[0].replace("[1]", "", 1).strip()
    data["author"] = author
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    corr_info = cleanSemicolon(src_data.get("Corresponding_author_s_", ""))
    addrs = corr_info.split(";")
    email = ""
    if len(addrs) > 0:
        tmp_list = []
        for item in addrs:
            tmps = item.split("(")
            if len(tmps) == 2:
                tmp_list.append("{}:{}".format(cleanSemicolon(tmps[1].replace(")","")),cleanSemicolon(tmps[0])))
        email = ";".join(tmp_list)
    data["corr_author"] = corr_info
    data["author_intro"] = corr_info
    data["email"] = email
    data["fund"] = cleanSemicolon(src_data.get("Funding_text", ""))
    data["fund_alt"] = cleanSemicolon(src_data.get("Funding_details", ""))
    data["fund"] = cleanSemicolon(src_data.get("Funding_text", ""))
    pub_date = ""
    pub_year = ""
    py = cleanSemicolon(src_data.get("Publication_year", ""))
    pd = cleanSemicolon(src_data.get("Issue_date", ""))
    pd = pd.replace("Februaryy", "February").replace("Febr", "February")
    pd = format_date(pd)
    if len(py) == 4 and py.isdigit():
        pub_year = py
    if len(pub_year) == 0 and "-" in py:
        tmp = py.split("-")[0].strip()
        if len(tmp) == 4 and tmp.isdigit():
            pub_year = tmp
    if len(pd) == 0:
        if len(pub_year) > 0:
            pub_date = pub_year + "0000"
    else:
        pub_date = pd
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "SUPPL" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    accesstype = cleanSemicolon(src_data.get("Open_Access_type_s_", ""))
    is_oa = "0"
    if len(accesstype) > 0:
        is_oa = "1"
    data["is_oa"] = is_oa
    data["Open_Access_type_s_latest"] = accesstype
    return data


def parse_eijournal_article_csv_json(src_data,redis_conn):
    src_data = deal_data(src_data)
    data = {}
    rawid = cleanSemicolon(src_data.get("accnum", ""))
    raw_type = cleanSemicolon(src_data.get("doctype", ""))
    if len(raw_type) == 0:
        dt = src_data.get("dt",[])
        if isinstance(dt,list) and len(dt) > 0:
            raw_type = dt[0]
    if "conference" in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "3"
    sub_db_id = "00017"
    product = "EI"
    sub_db = "QK"
    provider = "ELSEVIER"
    down_date = src_data["down_date"]
    rawid_alt = src_data["rawid_alt"]
    if rawid_alt is None:
        rawid_alt = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    provider_url = ""
    if len(rawid_alt) > 0:
        provider_url = "https://www.engineeringvillage.com/app/doc/?docid=" + rawid_alt
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["rawid_alt"] = rawid_alt
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    issn = ""
    vol = ""
    num = ""
    cited_info = src_data.get("citedby", {})
    if isinstance(cited_info,dict):
        issn = cleanSemicolon(cited_info.get("issn"))
        vol = cleanSemicolon(cited_info.get("firstvolume"))
        if len(vol) == 0:
            vol = cleanSemicolon(src_data.get("vo",""))
        num = cleanSemicolon(cited_info.get("firstissue"))
    abs_info = src_data.get("abstractrecord", {})
    ref_cnt = ""
    sub_db_class_name = ""
    if isinstance(abs_info, dict):
        ref_cnt = cleanSemicolon(abs_info.get("patrefcount", ""))
        if len(ref_cnt) == 0:
            ref_cnt = "0"
        sub_db_class_name = cleanSemicolon(abs_info.get("coden", ""))
        eissn = cleanSemicolon(abs_info.get("eissn", ""))
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)
    if len(eissn) > 0:
        if len(eissn) == 8:
            eissn = "{}-{}".format(eissn[0:4], eissn[4:8])
        elif "-" not in eissn:
            res = "{:8s}".format(eissn)
            eissn = res.replace(" ", "0")
            sb = list(eissn)
            sb.insert(4, '-')
            eissn = ''.join(sb)
    journal_name = src_data.get("source", "").strip()
    country = ""
    language = ""
    journal_raw_id = ""
    jinfo = getQkInfo(redis_conn, issn, "", journal_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        journal_raw_id = jinfo.get("journal_raw_id", "")
    data["country"] = country
    data["journal_raw_id"] = journal_raw_id
    if len(language) == 0:
        language = "UN"
    data["language"] = language
    data["journal_name"] = journal_name
    data["vol"] = vol
    data["num"] = num
    data["doi"] = cleanSemicolon(src_data.get("doi", ""))
    data["issn"] = issn
    data["eissn"] = eissn

    data["ref_cnt"] = ref_cnt
    data["title"] = cleanSemicolon(src_data.get("title", ""))
    data["title_alt"] = cleanSemicolon(src_data.get("tt", ""))

    page_info = cleanSemicolon(src_data.get("pages", ""))
    arraypage = splitpage(page_info)
    begin_page = arraypage[0]
    end_page = arraypage[1]
    jump_page = arraypage[2]
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["jump_page"] = jump_page
    data["sub_db_class_name"] = sub_db_class_name

    author = ""
    organ = ""
    author_1st = ""
    organ_1st = ""
    email = ""
    corr_author = ""

    authors = src_data.get("authors", [])
    au_email_list = []
    au_list = []
    corr_au_list = []
    if isinstance(authors, list) and len(authors) > 0:
        for item in authors:
            au_name = cleanSemicolon(item.get("name",""))
            au_email = cleanSemicolon(item.get("email", ""))
            au_affils = item.get("affils", [])
            if len(au_name) > 0:
                if len(au_email) > 0:
                    au_email_list.append("{}:{}".format(au_email, au_name))
                    corr_au_list.append("{},{}".format(au_email, au_name))
                if isinstance(au_affils,list) and len(au_affils) > 0:
                    tmp_list = []
                    for tmp in au_affils:
                        au_og_id = cleanSemicolon(tmp.get("id",""))
                        if len(au_og_id) > 0:
                            tmp_list.append(au_og_id)
                    if len(tmp_list) > 0:
                        au_list.append("{}[{}]".format(au_name,",".join(tmp_list)))

        if len(au_email_list) > 0:
            email = ";".join(au_email_list)
        if len(au_list) > 0:
            author = ";".join(au_list)
            author_1st = au_list[0].split("[")[0]
        if len(corr_au_list) > 0:
            corr_author = ";".join(corr_au_list)
    organs = src_data.get("affils", [])
    if isinstance(organs, list) and len(organs) > 0:
        if len(organs) > 0:
            org_list = []
            for item in organs:
                org_num = cleanSemicolon(str(item.get("id", "")))
                org_name = cleanSemicolon(item.get("name", ""))
                if len(org_name) > 0:
                    if len(org_num) > 0:
                        org_list.append("[{}]{}".format(org_num,org_name))
                    else:
                        org_list.append(org_name)
            if len(org_list) > 0:
                organ_1st = org_list[0].split(";")[0].replace("[1]", "", 1).strip()
                organ = ";".join(org_list)
    data["author"] = author
    data["email"] = email
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    # data["corr_author"] = corr_author
    # data["author_intro"] = corr_author
    pub_date = ""
    pub_year = ""
    py = cleanSemicolon(src_data.get("yr", ""))
    pd = cleanSemicolon(src_data.get("sd", ""))
    pd = pd.replace("Februaryy", "February").replace("Febr", "February")
    pd = format_date(pd)
    if len(py) == 4 and py.isdigit():
        pub_year = py
    if len(pub_year) == 0 and "-" in py:
        tmp = py.split("-")[0].strip()
        if len(tmp) == 4 and tmp.isdigit():
            pub_year = tmp
    if len(pd) == 0:
        if len(pub_year) > 0:
            pub_date = pub_year + "0000"
    else:
        pub_date = pd
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "SUPPL" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    oa_info = src_data.get("openAccess")
    is_oa = "0"
    if oa_info and oa_info == True:
        is_oa = "1"
    data["is_oa"] = is_oa
    return data


def parse_eiconference_article_csv(src_data,redis_conn):
    src_data = deal_data(src_data)
    codelanguageMap = initlanguageMap()
    data = {}
    rawid = cleanSemicolon(src_data.get("Accession_number", ""))
    raw_type = cleanSemicolon(src_data.get("Document_type", ""))
    if "conference" not in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "6"
    sub_db_id = "00020"
    product = "EI"
    sub_db = "HY"
    provider = "ELSEVIER"
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    rawid_alt = src_data["down_date"]
    if rawid_alt is None:
        rawid_alt = ""
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    provider_url = ""
    if len(rawid_alt) > 0:
        provider_url = "https://www.engineeringvillage.com/app/doc/?docid=" + rawid_alt
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["rawid_alt"] = rawid_alt
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    issn = cleanSemicolon(src_data.get("ISSN", ""))
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)

    eissn = cleanSemicolon(src_data.get("E_ISSN", ""))
    if len(eissn) > 0:
        if len(eissn) == 8:
            eissn = "{}-{}".format(eissn[0:4], eissn[4:8])
        elif "-" not in eissn:
            res = "{:8s}".format(eissn)
            eissn = res.replace(" ", "0")
            sb = list(eissn)
            sb.insert(4, '-')
            eissn = ''.join(sb)
    meeting_record_name = src_data.get("Source", "").strip()
    country = ""
    language = ""
    meeting_record_code = ""
    jinfo = getQkInfo(redis_conn, issn, "", meeting_record_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        meeting_record_code = jinfo.get("journal_raw_id", "")
    data["country"] = country
    if len(language) == 0:
        lgstr = src_data.get("Language", "").strip()
        lgstr = lgstr.replace("and", ",").replace(".", "").strip().replace("Chinese English", "Chinese;English")
        lgstr = lgstr
        if len(lgstr) > 0:
            lgStrings = lgstr.upper().split(";")
            hashSet = set()
            if len(lgStrings) > 1:
                for lg in lgStrings:
                    tmp = codelanguageMap.get(lg, "")
                    if tmp and len(tmp) > 0:
                        hashSet.add(tmp)
                language = ";".join(hashSet)
            else:
                language = codelanguageMap.get(lgstr.upper(), "")
            if not language:
                language = ""
        else:
            language = ""
    if len(language) == 0:
        language = "UN"
    data["language"] = language

    data["meeting_record_code"] = meeting_record_code
    data["meeting_record_name"] = meeting_record_name
    data["meeting_record_alt"] = cleanSemicolon(src_data.get("Abbreviated_source_title", ""))

    data["meeting_name"] = cleanSemicolon(src_data.get("Conference_name", ""))
    meeting_date_raw = cleanSemicolon(src_data.get("Conference_date", ""))
    data["meeting_date_raw"] = meeting_date_raw
    data["meeting_place"] = cleanSemicolon(src_data.get("Conference_location", ""))
    data["meeting_code"] = cleanSemicolon(src_data.get("Conference_code", ""))
    data["publisher"] = cleanSemicolon(src_data.get("Publisher", ""))
    data["sponsor"] = cleanSemicolon(src_data.get("Sponsor", ""))
    data["accept_date"] = format_date(meeting_date_raw.split("-")[0].strip())

    doc_no = cleanSemicolon(src_data.get("Article_number", ""))
    if len(doc_no) == 0:
        doc_no = cleanSemicolon(src_data.get("Article_or_Paper_number", ""))
    data["doc_no"] = doc_no
    subject = cleanSemicolon(src_data.get("Classification_code", ""))
    tmps = subject.split("-")
    sb1 = []
    for tmp in tmps:
        tmp = tmp.strip()
        if len(tmp) > 6:
            if tmp[:2].isdigit() and ' ' in tmp:
                index_of_space = tmp.find(' ')
                sb1.append(f"{tmp[:index_of_space].strip()}@{tmp[index_of_space + 1:].strip()}")
    if len(sb1) > 0:
        subject = ";".join(sb1)
    else:
        subject = ""
    data["subject"] = subject
    data["keyword"] = cleanSemicolon(src_data.get("Main_Heading", ""))
    controlled_terms = cleanSemicolon(src_data.get("Controlled_Subject_terms", ""))
    uncontrolled_terms = cleanSemicolon(src_data.get("Uncontrolled_terms", ""))
    tmps_co = controlled_terms.split("-")
    tmps_un = uncontrolled_terms.split("-")
    subject_word = ""
    sb2 = []
    for item in tmps_co:
        tmp = cleanSemicolon(item)
        if len(tmp) > 0:
            sb2.append(tmp)
    for item in tmps_un:
        tmp = cleanSemicolon(item)
        if len(tmp) > 0:
            sb2.append(tmp)
    if len(sb2) > 0:
        subject_word = ";".join(sb2)
    data["subject_word"] = subject_word
    ref_cnt = src_data.get("Number_of_references", "")
    if len(ref_cnt) == 0:
        ref_cnt = "0"
    data["ref_cnt"] = ref_cnt
    data["title"] = cleanSemicolon(src_data.get("Title", ""))
    data["title_alt"] = cleanSemicolon(src_data.get("Title_of_translation", ""))
    data["abstract"] = cleanSemicolon(src_data.get("Abstract", ""))
    data["abstract_type"] = cleanSemicolon(src_data.get("Abstract_type", ""))
    data["vol"] = cleanSemicolon(src_data.get("Volume", ""))
    data["num"] = cleanSemicolon(src_data.get("Issue", ""))
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    data["issn"] = issn
    data["eissn"] = eissn
    page_info = cleanSemicolon(src_data.get("Pages", ""))
    arraypage = splitpage(page_info)
    begin_page = arraypage[0]
    end_page = arraypage[1]
    jump_page = arraypage[2]
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["jump_page"] = jump_page
    data["page_info"] = page_info
    data["sub_db_class_name"] = cleanSemicolon(src_data.get("CODEN", ""))
    author = cleanSemicolon(src_data.get("Author", ""))
    author = author.replace('(', '[').replace(")", "]").strip()
    organ = cleanSemicolon(src_data.get("Author_affiliation", ""))
    organ = organ.replace('(', '[').replace(")", "]").strip()
    author = cleanSemicolon(author)
    organ = cleanSemicolon(organ)
    author_1st = author.split(";")[0].split("[")[0].strip()
    organ_1st = organ.split(";")[0].replace("[1]", "", 1).strip()
    data["author"] = author
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    corr_info = cleanSemicolon(src_data.get("Corresponding_author_s_", ""))
    addrs = corr_info.split(";")
    email = ""
    if len(addrs) > 0:
        tmp_list = []
        for item in addrs:
            tmps = item.split("(")
            if len(tmps) == 2:
                tmp_list.append("{}:{}".format(cleanSemicolon(tmps[1].replace(")","")),cleanSemicolon(tmps[0])))
        email = ";".join(tmp_list)
    data["corr_author"] = corr_info
    data["author_intro"] = corr_info
    data["email"] = email
    data["fund"] = cleanSemicolon(src_data.get("Funding_text", ""))
    data["fund_alt"] = cleanSemicolon(src_data.get("Funding_details", ""))
    data["fund"] = cleanSemicolon(src_data.get("Funding_text", ""))
    pub_date = ""
    pub_year = ""
    py = cleanSemicolon(src_data.get("Publication_year", ""))
    pd = cleanSemicolon(src_data.get("Issue_date", ""))
    pd = pd.replace("Februaryy", "February").replace("Febr", "February")
    pd = format_date(pd)
    if len(py) == 4 and py.isdigit():
        pub_year = py
    if len(pub_year) == 0 and "-" in py:
        tmp = py.split("-")[0].strip()
        if len(tmp) == 4 and tmp.isdigit():
            pub_year = tmp
    if len(pd) == 0:
        if len(pub_year) > 0:
            pub_date = pub_year + "0000"
    else:
        pub_date = pd
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "SUPPL" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    accesstype = cleanSemicolon(src_data.get("Open_Access_type_s_", ""))
    is_oa = "0"
    if len(accesstype) > 0:
        is_oa = "1"
    data["is_oa"] = is_oa
    data["Open_Access_type_s_latest"] = accesstype
    return data


def parse_eiconference_article_csv_json(src_data,redis_conn):
    src_data = deal_data(src_data)
    data = {}
    rawid = cleanSemicolon(src_data.get("accnum", ""))
    raw_type = cleanSemicolon(src_data.get("doctype", ""))
    if len(raw_type) == 0:
        dt = src_data.get("dt",[])
        if isinstance(dt,list) and len(dt) > 0:
            raw_type = dt[0]
    if "conference" not in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "6"
    sub_db_id = "00020"
    product = "EI"
    sub_db = "HY"
    provider = "ELSEVIER"
    down_date = src_data["down_date"]
    rawid_alt = src_data["rawid_alt"]
    if rawid_alt is None:
        rawid_alt = ""
    data["down_date"] = down_date
    data["latest_date"] = down_date
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    provider_url = ""
    if len(rawid_alt) > 0:
        provider_url = "https://www.engineeringvillage.com/app/doc/?docid=" + rawid_alt
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["rawid_alt"] = rawid_alt
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    issn = ""
    vol = ""
    num = ""
    cited_info = src_data.get("citedby", {})
    abs_info = src_data.get("abstractrecord", {})
    ref_cnt = ""
    sub_db_class_name = ""
    if isinstance(abs_info, dict):
        ref_cnt = cleanSemicolon(abs_info.get("patrefcount", ""))
        if len(ref_cnt) == 0:
            ref_cnt = "0"
        sub_db_class_name = cleanSemicolon(abs_info.get("coden", ""))
        eissn = cleanSemicolon(abs_info.get("eissn", ""))
    if isinstance(cited_info,dict):
        issn = cleanSemicolon(cited_info.get("issn"))
        vol = cleanSemicolon(cited_info.get("firstvolume"))
        if len(vol) == 0:
            vol = cleanSemicolon(src_data.get("vo",""))
        num = cleanSemicolon(cited_info.get("firstissue"))
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)
    if len(eissn) > 0:
        if len(eissn) == 8:
            eissn = "{}-{}".format(eissn[0:4], eissn[4:8])
        elif "-" not in eissn:
            res = "{:8s}".format(eissn)
            eissn = res.replace(" ", "0")
            sb = list(eissn)
            sb.insert(4, '-')
            eissn = ''.join(sb)
    meeting_record_name = src_data.get("source", "").strip()
    country = ""
    language = ""
    meeting_record_code = ""
    jinfo = getQkInfo(redis_conn, issn, "", meeting_record_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        meeting_record_code = jinfo.get("journal_raw_id", "")
    data["country"] = country
    if len(language) == 0:
        language = "UN"
    data["language"] = language
    data["vol"] = vol
    data["num"] = num
    data["doi"] = cleanSemicolon(src_data.get("doi", ""))
    data["issn"] = issn
    data["eissn"] = eissn

    data["meeting_record_code"] = meeting_record_code
    data["meeting_record_name"] = meeting_record_name
    data["publisher"] = cleanSemicolon(src_data.get("pn", ""))

    data["ref_cnt"] = ref_cnt
    data["title"] = cleanSemicolon(src_data.get("title", ""))
    data["title_alt"] = cleanSemicolon(src_data.get("tt", ""))

    page_info = cleanSemicolon(src_data.get("pages", ""))
    arraypage = splitpage(page_info)
    begin_page = arraypage[0]
    end_page = arraypage[1]
    jump_page = arraypage[2]
    data["begin_page"] = begin_page
    data["end_page"] = end_page
    data["jump_page"] = jump_page
    data["sub_db_class_name"] = sub_db_class_name

    author = ""
    organ = ""
    author_1st = ""
    organ_1st = ""
    email = ""
    corr_author = ""

    authors = src_data.get("authors", [])
    au_email_list = []
    au_list = []
    corr_au_list = []
    if isinstance(authors, list) and len(authors) > 0:
        for item in authors:
            au_name = cleanSemicolon(item.get("name",""))
            au_email = cleanSemicolon(item.get("email", ""))
            au_affils = item.get("affils", [])
            if len(au_name) > 0:
                if len(au_email) > 0:
                    au_email_list.append("{}:{}".format(au_email, au_name))
                    corr_au_list.append("{},{}".format(au_email, au_name))
                if isinstance(au_affils,list) and len(au_affils) > 0:
                    tmp_list = []
                    for tmp in au_affils:
                        au_og_id = cleanSemicolon(str(tmp.get("id","")))
                        if len(au_og_id) > 0:
                            tmp_list.append(au_og_id)
                    if len(tmp_list) > 0:
                        au_list.append("{}[{}]".format(au_name,",".join(tmp_list)))

        if len(au_email_list) > 0:
            email = ";".join(au_email_list)
        if len(au_list) > 0:
            author = ";".join(au_list)
            author_1st = au_list[0].split("[")[0]
        if len(corr_au_list) > 0:
            corr_author = ";".join(corr_au_list)
    organs = src_data.get("affils", [])
    if isinstance(organs, list) and len(organs) > 0:
        if len(organs) > 0:
            org_list = []
            for item in organs:
                org_num = cleanSemicolon(str(item.get("id", "")))
                org_name = cleanSemicolon(item.get("name", ""))
                if len(org_name) > 0:
                    if len(org_num) > 0:
                        org_list.append("[{}]{}".format(org_num,org_name))
                    else:
                        org_list.append(org_name)
            if len(org_list) > 0:
                organ_1st = org_list[0].split(";")[0].replace("[1]", "", 1).strip()
                organ = ";".join(org_list)
    data["author"] = author
    data["email"] = email
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    # data["corr_author"] = corr_author
    # data["author_intro"] = corr_author
    pub_date = ""
    pub_year = ""
    py = cleanSemicolon(src_data.get("yr", ""))
    pd = cleanSemicolon(src_data.get("sd", ""))
    pd = pd.replace("Februaryy", "February").replace("Febr", "February")
    pd = format_date(pd)
    if len(py) == 4 and py.isdigit():
        pub_year = py
    if len(pub_year) == 0 and "-" in py:
        tmp = py.split("-")[0].strip()
        if len(tmp) == 4 and tmp.isdigit():
            pub_year = tmp
    if len(pd) == 0:
        if len(pub_year) > 0:
            pub_date = pub_year + "0000"
    else:
        pub_date = pd
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "SUPPL" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    oa_info = src_data.get("openAccess")
    is_oa = "0"
    if oa_info and oa_info == True:
        is_oa = "1"
    data["is_oa"] = is_oa
    return data




if __name__ == '__main__':
    print("[1]sdasdasd".split("["))