import datetime
import json
import re
import time

from re_common.vip.baseencodeid import BaseLngid

docType = {
    "Review":"K", "Article":"J",
    "Erratum":"K",
    "Report":"R",
    "Letter":"K",
    "Short Survey":"K",
    "Book":"M",
    "Book Chapter":"M",
    "Business Article":"K",
    "Retracted":"K",
    "Abstract Report":"R",
    "Scopus":"K",
    "Conference Paper":"C",
    "Article in Press":"J",
    "Editorial":"K",
    "Note":"K",
    "Conference Review":"C",
    "Data Paper":"K"
}

subject_dic = {"22": "Engineering", "2210": "Mechanical Engineering", "2207": "Control and Systems Engineering", "2206": "Computational Mechanics", "2205": "Civil and Structural Engineering", "2204": "Biomedical Engineering", "2203": "Automotive Engineering", "2202": "Aerospace Engineering", "2201": "Engineering (miscellaneous)", "2200": "General Engineering", "2209": "Industrial and Manufacturing Engineering", "2208": "Electrical and Electronic Engineering", "2216": "Architecture", "2215": "Building and Construction", "2214": "Media Technology", "2213": "Safety, Risk, Reliability and Quality", "2212": "Ocean Engineering", "2211": "Mechanics of Materials", "23": "Environmental Science", "2306": "Global and Planetary Change", "2305": "Environmental Engineering", "2304": "Environmental Chemistry", "2303": "Ecology", "2302": "Ecological Modeling", "2301": "Environmental Science (miscellaneous)", "2300": "General Environmental Science", "2309": "Nature and Landscape Conservation", "2308": "Management, Monitoring, Policy and Law", "2307": "Health, Toxicology and Mutagenesis", "2312": "Water Science and Technology", "2311": "Waste Management and Disposal", "2310": "Pollution", "24": "Immunology and Microbiology", "2405": "Parasitology", "2404": "Microbiology", "2403": "Immunology", "2402": "Applied Microbiology and Biotechnology", "2401": "Immunology and Microbiology (miscellaneous)", "2400": "General Immunology and Microbiology", "2406": "Virology", "25": "Materials Science", "2504": "Electronic, Optical and Magnetic Materials", "2503": "Ceramics and Composites", "2502": "Biomaterials", "2501": "Materials Science (miscellaneous)", "2500": "General Materials Science", "2508": "Surfaces, Coatings and Films", "2507": "Polymers and Plastics", "2506": "Metals and Alloys", "2505": "Materials Chemistry", "26": "Mathematics", "2603": "Analysis", "2602": "Algebra and Number Theory", "2601": "Mathematics (miscellaneous)", "2600": "General Mathematics", "2609": "Logic", "2608": "Geometry and Topology", "2607": "Discrete Mathematics and Combinatorics", "2606": "Control and Optimization", "2605": "Computational Mathematics", "2604": "Applied Mathematics", "2614": "Theoretical Computer Science", "2613": "Statistics and Probability", "2612": "Numerical Analysis", "2611": "Modeling and Simulation", "2610": "Mathematical Physics", "27": "Medicine", "2702": "Anatomy", "2701": "Medicine (miscellaneous)", "2700": "General Medicine", "2709": "Drug Guides", "2708": "Dermatology", "2707": "Complementary and Alternative Medicine", "2706": "Critical Care and Intensive Care Medicine", "2705": "Cardiology and Cardiovascular Medicine", "2704": "Biochemistry (medical)", "2703": "Anesthesiology and Pain Medicine", "2713": "Epidemiology", "2712": "Endocrinology, Diabetes and Metabolism", "2711": "Emergency Medicine", "2710": "Embryology", "2719": "Health Policy", "2718": "Health Informatics", "2717": "Geriatrics and Gerontology", "2716": "Genetics (clinical)", "2715": "Gastroenterology", "2714": "Family Practice", "2724": "Internal Medicine", "2723": "Immunology and Allergy", "2722": "Histology", "2721": "Hepatology", "2720": "Hematology", "2729": "Obstetrics and Gynecology", "2728": "Neurology (clinical)", "2727": "Nephrology", "2726": "Microbiology (medical)", "2725": "Infectious Diseases", "2735": "Pediatrics, Perinatology and Child Health", "2734": "Pathology and Forensic Medicine", "2733": "Otorhinolaryngology", "2732": "Orthopedics and Sports Medicine", "2731": "Ophthalmology", "2730": "Oncology", "2739": "Public Health, Environmental and Occupational Health", "2738": "Psychiatry and Mental Health", "2737": "Physiology (medical)", "2736": "Pharmacology (medical)", "2746": "Surgery", "2745": "Rheumatology", "2744": "Reviews and References (medical)", "2743": "Reproductive Medicine", "2742": "Rehabilitation", "2741": "Radiology, Nuclear Medicine and Imaging", "2740": "Pulmonary and Respiratory Medicine", "2748": "Urology", "2747": "Transplantation", "28": "Neuroscience", "2801": "Neuroscience (miscellaneous)", "2800": "General Neuroscience", "2809": "Sensory Systems", "2808": "Neurology", "2807": "Endocrine and Autonomic Systems", "2806": "Developmental Neuroscience", "2805": "Cognitive Neuroscience", "2804": "Cellular and Molecular Neuroscience", "2803": "Biological Psychiatry", "2802": "Behavioral Neuroscience", "29": "Nursing", "2909": "Gerontology", "2900": "General Nursing", "2908": "Fundamentals and Skills", "2907": "Emergency Nursing", "2906": "Critical Care Nursing", "2905": "Community and Home Care", "2904": "Care Planning", "2903": "Assessment and Diagnosis", "2902": "Advanced and Specialized Nursing", "2901": "Nursing (miscellaneous)", "2911": "Leadership and Management", "2910": "Issues, Ethics and Legal Aspects", "2919": "Pediatrics", "2918": "Pathophysiology", "2917": "Oncology (nursing)", "2916": "Nutrition and Dietetics", "2915": "Nurse Assisting", "2914": "Medical and Surgical Nursing", "2913": "Maternity and Midwifery", "2912": "LPN and LVN", "2922": "Research and Theory", "2921": "Psychiatric Mental Health", "2920": "Pharmacology (nursing)", "2923": "Review and Exam Preparation", "30": "Pharmacology, Toxicology and Pharmaceutics", "3002": "Drug Discovery", "3001": "Pharmacology, Toxicology and Pharmaceutics (miscellaneous)", "3000": "General Pharmacology, Toxicology and Pharmaceutics", "3005": "Toxicology", "3004": "Pharmacology", "3003": "Pharmaceutical Science", "31": "Physics and Astronomy", "3101": "Physics and Astronomy (miscellaneous)", "3100": "General Physics and Astronomy", "3110": "Surfaces and Interfaces", "3109": "Statistical and Nonlinear Physics", "3108": "Radiation", "3107": "Atomic and Molecular Physics, and Optics", "3106": "Nuclear and High Energy Physics", "3105": "Instrumentation", "3104": "Condensed Matter Physics", "3103": "Astronomy and Astrophysics", "3102": "Acoustics and Ultrasonics", "10": "Multidisciplinary", "1000": "Multidisciplinary", "32": "Psychology", "3200": "General Psychology", "3207": "Social Psychology", "3206": "Neuropsychology and Physiological Psychology", "3205": "Experimental and Cognitive Psychology", "3204": "Developmental and Educational Psychology", "3203": "Clinical Psychology", "3202": "Applied Psychology", "3201": "Psychology (miscellaneous)", "11": "Agricultural and Biological Sciences", "1110": "Plant Science", "1107": "Forestry", "1106": "Food Science", "1105": "Ecology, Evolution, Behavior and Systematics", "1104": "Aquatic Science", "1103": "Animal Science and Zoology", "1102": "Agronomy and Crop Science", "1101": "Agricultural and Biological Sciences (miscellaneous)", "1100": "General Agricultural and Biological Sciences", "1109": "Insect Science", "1108": "Horticulture", "1111": "Soil Science", "33": "Social Sciences", "3310": "Linguistics and Language", "3307": "Human Factors and Ergonomics", "3306": "Health (social science)", "3305": "Geography, Planning and Development", "3304": "Education", "3303": "Development", "3302": "Archeology", "3301": "Social Sciences (miscellaneous)", "3300": "General Social Sciences", "3309": "Library and Information Sciences", "3308": "Law", "3321": "Public Administration", "3320": "Political Science and International Relations", "3318": "Gender Studies", "3317": "Demography", "3316": "Cultural Studies", "3315": "Communication", "3314": "Anthropology", "3313": "Transportation", "3312": "Sociology and Political Science", "3311": "Safety Research", "3319": "Life-span and Life-course Studies", "3322": "Urban Studies", "12": "Arts and Humanities", "1213": "Visual Arts and Performing Arts", "1212": "Religious Studies", "1211": "Philosophy", "1210": "Music", "1206": "Conservation", "1205": "Classics", "1204": "Archeology (arts and humanities)", "1203": "Language and Linguistics", "1202": "History", "1201": "Arts and Humanities (miscellaneous)", "1200": "General Arts and Humanities", "1209": "Museology", "1208": "Literature and Literary Theory", "1207": "History and Philosophy of Science", "34": "Veterinary", "3404": "Small Animals", "3403": "Food Animals", "3402": "Equine", "3401": "Veterinary (miscellaneous)", "3400": "General Veterinary", "13": "Biochemistry, Genetics and Molecular Biology", "1305": "Biotechnology", "1304": "Biophysics", "1303": "Biochemistry", "1302": "Aging", "1301": "Biochemistry, Genetics and Molecular Biology (miscellaneous)", "1300": "General Biochemistry, Genetics and Molecular Biology", "1309": "Developmental Biology", "1308": "Clinical Biochemistry", "1307": "Cell Biology", "1306": "Cancer Research", "1315": "Structural Biology", "1314": "Physiology", "1313": "Molecular Medicine", "1312": "Molecular Biology", "1311": "Genetics", "1310": "Endocrinology", "35": "Dentistry", "3505": "Orthodontics", "3504": "Oral Surgery", "3503": "Dental Hygiene", "3502": "Dental Assisting", "3501": "Dentistry (miscellaneous)", "3500": "General Dentistry", "3506": "Periodontics", "14": "Business, Management and Accounting", "1404": "Management Information Systems", "1403": "Business and International Management", "1402": "Accounting", "1401": "Business, Management and Accounting (miscellaneous)", "1400": "General Business, Management and Accounting", "1409": "Tourism, Leisure and Hospitality Management", "1408": "Strategy and Management", "1407": "Organizational Behavior and Human Resource Management", "1406": "Marketing", "1405": "Management of Technology and Innovation", "1410": "Industrial Relations", "36": "Health Professions", "3604": "Emergency Medical Services", "3603": "Complementary and Manual Therapy", "3602": "Chiropractics", "3601": "Health Professions (miscellaneous)", "3600": "General Health Professions", "3609": "Occupational Therapy", "3608": "Medical Terminology", "3607": "Medical Laboratory Technology", "3606": "Medical Assisting and Transcription", "3605": "Health Information Management", "3615": "Respiratory Care", "3614": "Radiological and Ultrasound Technology", "3613": "Podiatry", "3612": "Physical Therapy, Sports Therapy and Rehabilitation", "3611": "Pharmacy", "3610": "Optometry", "3616": "Speech and Hearing", "15": "Chemical Engineering", "1503": "Catalysis", "1502": "Bioengineering", "1501": "Chemical Engineering (miscellaneous)", "1500": "General Chemical Engineering", "1508": "Process Chemistry and Technology", "1507": "Fluid Flow and Transfer Processes", "1506": "Filtration and Separation", "1505": "Colloid and Surface Chemistry", "1504": "Chemical Health and Safety", "16": "Chemistry", "1602": "Analytical Chemistry", "1601": "Chemistry (miscellaneous)", "1600": "General Chemistry", "1607": "Spectroscopy", "1606": "Physical and Theoretical Chemistry", "1605": "Organic Chemistry", "1604": "Inorganic Chemistry", "1603": "Electrochemistry", "17": "Computer Science", "1701": "Computer Science (miscellaneous)", "1700": "General Computer Science", "1709": "Human-Computer Interaction", "1708": "Hardware and Architecture", "1707": "Computer Vision and Pattern Recognition", "1706": "Computer Science Applications", "1705": "Computer Networks and Communications", "1704": "Computer Graphics and Computer-Aided Design", "1703": "Computational Theory and Mathematics", "1702": "Artificial Intelligence", "1712": "Software", "1711": "Signal Processing", "1710": "Information Systems", "18": "Decision Sciences", "1800": "General Decision Sciences", "1804": "Statistics, Probability and Uncertainty", "1803": "Management Science and Operations Research", "1802": "Information Systems and Management", "1801": "Decision Sciences (miscellaneous)", "19": "Earth and Planetary Sciences", "1909": "Geotechnical Engineering and Engineering Geology", "1908": "Geophysics", "1907": "Geology", "1906": "Geochemistry and Petrology", "1905": "Economic Geology", "1904": "Earth-Surface Processes", "1903": "Computers in Earth Sciences", "1902": "Atmospheric Science", "1901": "Earth and Planetary Sciences (miscellaneous)", "1900": "General Earth and Planetary Sciences", "1910": "Oceanography", "1913": "Stratigraphy", "1912": "Space and Planetary Science", "1911": "Paleontology", "20": "Economics, Econometrics and Finance", "2001": "Economics, Econometrics and Finance (miscellaneous)", "2000": "General Economics, Econometrics and Finance", "2003": "Finance", "2002": "Economics and Econometrics", "21": "Energy", "2100": "General Energy", "2105": "Renewable Energy, Sustainability and the Environment", "2104": "Nuclear Energy and Engineering", "2103": "Fuel Technology", "2102": "Energy Engineering and Power Technology", "2101": "Energy (miscellaneous)"}

async def getQkInfo(redis_conn,issn,eissn,journal_name):
    journal_name = re.sub(r'[^a-zA-Z0-9]', '', journal_name)
    qk_info = None
    if len(issn) > 0:
        qk_info = await redis_conn.hget("qk_info", issn)
    if qk_info is None and len(eissn) > 0:
        qk_info = await redis_conn.hget("qk_info", eissn)
    if qk_info is None:
        qk_info = await redis_conn.hget("qk_info", "")
    if qk_info:
        qk_info = json.loads(qk_info)
        if len(qk_info) == 1:
            qk_info = list(qk_info.values())[0]
        elif len(qk_info) > 1:
            qk_info = qk_info.get(journal_name)
    return qk_info


def deal_data(data_dicts):
    key_dicts = {'EID': 'EID',
                 'Title': 'Title',
                 'Document Type': 'Document_Type',
                 'Abstract': 'Abstract',
                 'Page start': 'Page_start',
                 'Abbreviated Source Title': 'Abbreviated_Source_Title',
                 'Volume': 'Volume',
                 'Issue': 'Issue',
                 'Authors': 'Authors',
                 '﻿Authors': '_Authors',
                 'Authors with affiliations': 'Authors_with_affiliations',
                 'Affiliations': 'Affiliations',
                 'Author(s) ID': 'Author_s__ID',
                 'Author full names': 'Author_full_names',
                 'Link': 'Link',
                 'DOI': 'DOI',
                 'Language of Original Document': 'Language_of_Original_Document',
                 'Year': 'Year',
                 'Conference date': 'Conference_date',
                 'Author Keywords': 'Author_Keywords',
                 'Indexed Keywords': 'Indexed_Keywords',
                 'Index Keywords': 'Indexed_Keywords',
                 'Source title': 'Source_title',
                 'ISSN': 'ISSN',
                 'Page end': 'Page_end',
                 'Publisher': 'Publisher',
                 'Sponsors': 'Sponsors',
                 'Conference name': 'Conference_name',
                 'Conference location': 'Conference_location',
                 'Conference code': 'Conference_code',
                 'CODEN': 'CODEN',
                 'Correspondence Address': 'Correspondence_Address',
                 'Cited by': 'Cited_by',
                 'Open Access': 'Open_Access',
                 'Art. No.': 'Art__No_',
                 'Page count': 'Page_count',
                 'Funding Details': 'Funding_Details',
                 'Funding Texts': 'Funding_Texts',
                 'Source': 'Source',
                 'Editors': 'Editors',
                 'Publication Stage': 'Publication_Stage',
                 'Chemicals/CAS': 'Chemicals_CAS',
                 'References': 'References',
                 'Manufacturers': 'Manufacturers',
                 'Molecular Sequence Numbers': 'Molecular_Sequence_Numbers',
                 'Tradenames': 'Tradenames',
                 'PubMed ID': 'PubMed_ID',
                 'ISBN': 'ISBN',
                 'Titles': 'Title'}
    new_dicts = {}
    for k, v in data_dicts.items():
        if k.find("Funding Texts") > -1:
            k = k.replace(" ", "_")
            new_dicts[k] = "" if v is None else v
        else:
            if k in key_dicts.keys():
                new_dicts[key_dicts[k]] = "" if v is None else v
            else:
                new_dicts[k] = "" if v is None else v

    if "﻿Authors" in data_dicts.keys() and "Authors" in data_dicts.keys():
        new_dicts["Authors"] = ""
        if data_dicts["﻿Authors"] and data_dicts["Authors"]:
            new_dicts["Authors"] = data_dicts["Authors"]
        else:
            if data_dicts["﻿Authors"]:
                new_dicts["Authors"] = data_dicts["﻿Authors"]
            if data_dicts["Authors"]:
                new_dicts["Authors"] = data_dicts["Authors"]

    if "﻿Authors" in data_dicts.keys() and "Authors" not in data_dicts.keys():
        new_dicts["Authors"] = data_dicts["﻿Authors"]

    if "Title" in data_dicts.keys() and "Titles" in data_dicts.keys():
        new_dicts["Title"] = ""
        if data_dicts["Titles"] and data_dicts["Title"]:
            new_dicts["Title"] = data_dicts["Title"]
        else:
            if data_dicts["Titles"]:
                new_dicts["Title"] = data_dicts["Titles"]
            if data_dicts["Title"]:
                new_dicts["Title"] = data_dicts["Title"]

    if "Indexed Keywords" in data_dicts.keys() and "Index Keywords" in data_dicts.keys():
        new_dicts["Indexed_Keywords"] = ""
        if data_dicts["Index Keywords"] and data_dicts["Indexed Keywords"]:
            new_dicts["Indexed_Keywords"] = data_dicts["Index Keywords"]
        else:
            if data_dicts["Index Keywords"]:
                new_dicts["Indexed_Keywords"] = data_dicts["Index Keywords"]
            if data_dicts["Indexed Keywords"]:
                new_dicts["Indexed_Keywords"] = data_dicts["Indexed Keywords"]

    return new_dicts


def letter_to_number(letter):#给定a-z, 输出1-26
    # 将字母转换为小写以处理大小写不敏感
    letter = letter.lower()
    # 计算字母的序号
    if len(letter) == 1:
        number = ord(letter) - ord('a') + 1
        num_str = str(number)
    else:
        tmp_list = []
        for i in letter:
            tmp_list.append(str(ord(i) - ord('a') + 1))
        num_str = ",".join(tmp_list)
    return num_str


def initlanguageMap():
    codelanguageMap = {}
    str = "AA★AA;AB★AB;AE★AE;AF★AF;AK★AK;AM★AM;AN★AN;AR★AR;AS★AS;AV★AV;AY★AY;AZ★AZ;BA★BA;BE★BE;BG★BG;BH★BH;BI★BI;BM★BM;BN★BN;BO★BO;BR★BR;BS★BS;CA★CA;CE★CE;CH★CH;CO★CO;CR★CR;CS★CS;CU★CU;CV★CV;CY★CY;DA★DA;DE★DE;DV★DV;DZ★DZ;EE★EE;EL★EL;EN★EN;EO★EO;ES★ES;ET★ET;EU★EU;FA★FA;FF★FF;FI★FI;FJ★FJ;FO★FO;FR★FR;FY★FY;GA★GA;GD★GD;GL★GL;GN★GN;GU★GU;GV★GV;HA★HA;HE★HE;HI★HI;HO★HO;HR★HR;HT★HT;HU★HU;HY★HY;HZ★HZ;IA★IA;ID★ID;IE★IE;IG★IG;II★II;IK★IK;IO★IO;IS★IS;IT★IT;IU★IU;JA★JA;JV★JV;KA★KA;KG★KG;KI★KI;KJ★KJ;KK★KK;KL★KL;KM★KM;KN★KN;KO★KO;KR★KR;KS★KS;KU★KU;KV★KV;KW★KW;KY★KY;LA★LA;LB★LB;LG★LG;LI★LI;LN★LN;LO★LO;LT★LT;LU★LU;LV★LV;MD★MD;MG★MG;MH★MH;MI★MI;MK★MK;ML★ML;MN★MN;MO★MO;MR★MR;MS★MS;MT★MT;MY★MY;NA★NA;NB★NB;ND★ND;NE★NE;NG★NG;NL★NL;NN★NN;NO★NO;NR★NR;NV★NV;NY★NY;OC★OC;OJ★OJ;OM★OM;OR★OR;OS★OS;PA★PA;PI★PI;PL★PL;PS★PS;PT★PT;QU★QU;RM★RM;RN★RN;RO★RO;RU★RU;RW★RW;SA★SA;SC★SC;SD★SD;SE★SE;SG★SG;SH★SH;SI★SI;SK★SK;SL★SL;SM★SM;SN★SN;SO★SO;SQ★SQ;SR★SR;SS★SS;ST★ST;SU★SU;SV★SV;SW★SW;TA★TA;TE★TE;TG★TG;TH★TH;TI★TI;TK★TK;TL★TL;TN★TN;TO★TO;TR★TR;TS★TS;TT★TT;TW★TW;TY★TY;UG★UG;UK★UK;UR★UR;UZ★UZ;VE★VE;VI★VI;VO★VO;WA★WA;WO★WO;XH★XH;YI★YI;YO★YO;ZA★ZA;ZH★ZH;ZU★ZU;AA★AAR;AA★Afar;AB★ABK;AB★Abkhazian;AE★AVE;AE★Avestan;AF★AFR;AF★Afrikaans;AK★AKA;AK★AKA + 2;AK★Akan;AM★AMH;AM★Amharic;AN★Aragonese;AN★ARG;AR★ARA;AR★ARA + 30;AR★Arabic;AS★ASM;AS★Assamese;AV★AVA;AV★Avaric;AY★AYM;AY★AYM + 2;AY★Aymara;AZ★AZE;AZ★AZE + 2;AZ★Azerbaijani;BA★BAK;BA★Bashkir;BE★BEL;BE★Belarusian;BG★BUL;BG★Bulgarian;BH★BIH;BH★Bihari;BH★None;BI★BIS;BI★Bislama;BM★BAM;BM★Bambara;BN★BEN;BN★Bengali;BO★BOD;BO★TIB;BO★Tibetan;BR★BRE;BR★Breton;BS★BOS;BS★Bosnian;CA★CAT;CA★Catalan;CE★CHE;CE★Chechen;CH★CHA;CH★Chamorro;CO★Corsican;CO★COS;CR★CRE;CR★CRE + 6;CR★Cree;CS★CES;CS★CZE;CS★Czech;CU★CHU;CU★Church Slavic;CV★Chuvash;CV★CHV;CY★CYM;CY★WEL;CY★Welsh;DA★DAN;DA★Danish;DE★DEU;DE★GER;DE★German;DV★DIV;DV★Divehi;DZ★DZO;DZ★Dzongkha;EE★EWE;EL★ELL;EL★GRE;EL★Greek;EN★ENG;EN★English;EO★EPO;EO★Esperanto;ES★Castilian;ES★SPA;ES★Spanish;ET★EST;ET★Estonian;EU★BAQ;EU★Basque;EU★EUS;FA★FAS;FA★FAS + 1;FA★FAS + 2;FA★PER;FA★Persian;FF★FUL;FF★FUL + 9;FF★Fulah;FI★FIN;FI★Finnish;FJ★FIJ;FJ★Fijian;FO★FAO;FO★Faroese;FR★FRA;FR★FRE;FR★French;FY★FRY;FY★FRY + 3;FY★Western Frisian;GA★GLE;GA★Irish;GD★GLA;GD★Scottish Gaelic;GL★Galician;GL★GLG;GN★GRN;GN★GRN + 5;GN★Guaraní;GU★GUJ;GU★Gujarati;GV★GLV;GV★Manx;HA★HAU;HA★Hausa;HE★HEB;HE★Hebrew;HI★HIN;HI★Hindi;HO★Hiri Motu;HO★HMO;HR★Croatian;HR★HRV;HR★SCR;HT★Haitian Creole;HT★HAT;HU★HUN;HU★Hungarian;HY★ARM;HY★Armenian;HY★HYE;HZ★HER;HZ★Herero;IA★INA;IA★Interlingua;IA★International Auxiliary Language Association;ID★IND;ID★Indonesian;IE★ILE;IE★Interlingue;IG★IBO;IG★Igbo;II★III;II★Sichuan Yi;IK★Inupiaq;IK★IPK;IK★IPK + 2;IO★IDO;IS★ICE;IS★Icelandic;IS★ISL;IT★ITA;IT★Italian;IU★IKU;IU★IKU + 2;IU★Inuktitut;JA★Japanese;JA★JPN;JV★JAV;JV★Javanese;KA★GEO;KA★Georgian;KA★KAT;KG★KON;KG★KON + 3;KG★Kongo;KI★KIK;KI★Kikuyu;KJ★KUA;KJ★Kwanyama;KK★KAZ;KK★Kazakh;KL★KAL;KL★Kalaallisut;KM★KHM;KM★Khmer;KN★KAN;KN★Kannada;KO★KOR;KO★Korean;KR★Kanuri;KR★KAU;KR★KAU + 3;KS★KAS;KS★Kashmiri;KU★KUR;KU★KUR + 3;KU★Kurdish;KV★KOM;KV★KOM + 2;KV★Komi;KW★COR;KW★Cornish;KY★KIR;KY★Kirghiz;LA★LAT;LA★Latin;LB★LTZ;LB★Luxembourgish;LG★Ganda;LG★LUG;LI★LIM;LI★Limburgish;LN★LIN;LN★Lingala;LO★LAO;LT★LIT;LT★Lithuanian;LU★LUB;LU★Luba-Katanga;LV★Latvian;LV★LAV;MD★Moldovan;MG★Malagasy;MG★MLG;MG★MLG + 10;MH★MAH;MH★Marshallese;MI★MAO;MI★Māori;MI★MRI;MK★MAC;MK★Macedonian;MK★MKD;ML★MAL;ML★Malayalam;MN★MON;MN★MON + 2;MN★Mongolian;MO★MOL;MO★Moldavian;MR★MAR;MR★Marathi;MS★Malay;MS★MAY;MS★MSA;MS★MSA + 12;MS★MSA + 13;MT★Maltese;MT★MLT;MY★BUR;MY★Burmese;MY★MYA;NA★NAU;NA★Nauru;NB★NOB;NB★Norwegian Bokmål;ND★NDE;ND★North Ndebele;NE★NEP;NE★Nepali;NG★NDO;NG★Ndonga;NL★DUT;NL★Dutch;NL★NLD;NN★NNO;NN★Norwegian Nynorsk;NO★NOR;NO★NOR + 2;NO★Norwegian;NR★NBL;NR★South Ndebele;NV★NAV;NV★Navajo;NY★Chichewa;NY★NYA;OC★Occitan;OC★OCI;OJ★OJI;OJ★OJI + 7;OJ★Ojibwa;OM★ORM;OM★ORM + 4;OM★Oromo;OR★ORI;OR★Oriya;OS★OSS;OS★Ossetian;PA★PAN;PA★Panjabi;PI★Pāli;PI★PLI;PL★POL;PL★Polish;PS★Pashto;PS★PUS;PS★PUS + 3;PT★POR;PT★Portuguese;QU★QUE;QU★QUE + 44;QU★Quechua;RM★Raeto-Romance;RM★ROH;RN★Kirundi;RN★RUN;RO★Romanian;RO★RON;RO★RUM;RU★RUS;RU★Russian;RW★KIN;RW★Kinyarwanda;SA★SAN;SA★Sanskrit;SC★Sardinian;SC★SRD;SC★SRD + 4;SD★Sindhi;SD★SND;SE★Northern Sami;SE★SME;SG★SAG;SG★Sango;SH★HBS ;SH★HBS + 3;SH★Serbo-Croatian;SI★SIN;SI★Sinhalese;SK★SLK;SK★SLO;SK★Slovak;SL★Slovenian;SL★SLV;SM★Samoan;SM★SMO;SN★Shona;SN★SNA;SO★SOM;SO★Somali;SQ★ALB;SQ★Albanian;SQ★SQI;SQ★SQI + 3;SQ★SQI + 4;SR★SCC;SR★Serbian;SR★SRP;SS★SSW;SS★Swati;ST★SOT;ST★Sotho;SU★SUN;SU★Sundanese;SV★SWE;SV★Swedish;SW★SWA;SW★SWA + 2;SW★Swahili;TA★TAM;TA★Tamil;TE★TEL;TE★Telugu;TG★Tajik;TG★TGK;TH★THA;TH★Thai;TI★Tigrinya;TI★TIR;TK★TUK;TK★Turkmen;TL★Tagalog;TL★TGL;TN★TSN;TN★Tswana;TO★TON;TO★Tonga;TR★TUR;TR★Turkish;TS★TSO;TS★Tsonga;TT★TAT;TT★Tatar;TW★TWI;TY★TAH;TY★Tahitian;UG★UIG;UG★Uyghur;UK★UKR;UK★Ukrainian;UR★URD;UR★Urdu;UZ★UZB;UZ★UZB + 2;UZ★Uzbek;VE★VEN;VE★Venda;VI★VIE;VI★Vietnamese;VO★VOL;VO★Volapük;WA★Walloon;WA★WLN;WO★WOL;WO★Wolof;XH★XHO;XH★Xhosa;YI★YID;YI★YID + 2;YI★Yiddish;YO★YOR;YO★Yoruba;ZA★ZHA;ZA★ZHA + 2;ZA★Zhuang;ZH★CHI;ZH★Chinese;ZH★ZHO;ZH★ZHO + 12;ZH★ZHO + 13;ZU★ZUL;ZU★Zulu"
    tmps = str.split(";")
    for line in tmps:
        line = line.strip()
        if len(line) < 3:
            continue
        vec = line.split("★")
        if len(vec) < 2:
            continue
        lan = vec[1].upper().strip()
        code = vec[0].strip()
        if len(lan) < 1:
            continue
        if len(code) < 1:
            continue
        codelanguageMap[lan] = code
    return codelanguageMap


def cleanSemicolon(text):
    if text is None:
        return ""
    text = text.replace('；', ';').replace("; ", ";")  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub(" +", " ", text)  # 多个空格转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def get_full_abbr_dic(full_txt, abbr_txt):
    full_abbr_dic = {}
    tmp_abbr_list = abbr_txt.split(";")
    tmp_full_list = full_txt.split(";")
    if len(tmp_abbr_list) == len(tmp_full_list):
        for i, item in enumerate(tmp_abbr_list):
            full_abbr_dic[cleanSemicolon(item)] = cleanSemicolon(tmp_full_list[i]).split("(")[0].strip()
    return full_abbr_dic


def number_by_map(mapcre_ins,AF_AU_dic):
    result = ["", ""]
    ins_num = {}
    num = 1
    creator = ""
    institution = ""
    for author, institutions in mapcre_ins.items():
        tmp = AF_AU_dic.get(author, "")
        if len(tmp) > 0:
            author = tmp
        if not institutions:
            creator += f"{author};"
        else:
            creator += f"{author}["
            for ins in institutions.split(";"):
                if ins not in ins_num:
                    ins_num[ins] = str(num)
                    institution += f"[{num}]{ins};"
                    num += 1
                creator += f"{ins_num[ins]},"
            creator = creator[:-1] + "];"
    if creator:
        creator = creator[:-1]
    if institution:
        institution = institution[:-1]
    creator = sort_creater(creator)
    result[0] = creator
    result[1] = institution
    return result


def sort_creater(creator):
    matches = re.finditer(r"\[(.*?)\]", creator)
    for match in matches:
        rp = match.group(1)
        num_list = rp.split(",")
        num_list.sort()
        rp_string = ",".join(num_list)
        creator = creator.replace(f"[{rp}]", f"[{rp_string}]")
    return creator


def set_scopus_ref_cited(ref_data,src_data):
    old_linked_id = src_data.get("EID", "")
    if old_linked_id is None:
        old_linked_id = ""
    author_1st = ""
    author = ""
    author_str = cleanSemicolon(src_data.get("Authors", ""))
    if len(author_str) > 0:
        aus_list = author_str.split(",")
        author_1st = aus_list[0]
        author = ";".join(aus_list)
    ref_data["author_1st"] = author_1st.replace("[No author name available]","")
    ref_data["author"] = author.replace("[No author name available]","")
    ref_data["publisher"] = cleanSemicolon(src_data.get("Publisher", ""))
    title = cleanSemicolon(src_data.get("Title", ""))
    ref_data["title"] = title.replace("[No title available]",'')
    ref_data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    # ref_data["issn"] = cleanSemicolon(src_data.get("ISSN", ""))
    vol = cleanSemicolon(src_data.get("Volume", ""))
    ref_data["vol"] = vol
    num = cleanSemicolon(src_data.get("Issue", ""))
    ref_data["num"] = num
    begin_page = cleanSemicolon(src_data.get("Page start", ""))
    ref_data["begin_page"] = begin_page
    end_page = cleanSemicolon(src_data.get("Page end", ""))
    ref_data["end_page"] = end_page
    pub_year = cleanSemicolon(src_data.get("Year", ""))
    ref_data["pub_year"] = pub_year
    source_name = cleanSemicolon(src_data.get("Source title", ""))
    ref_data["source_name"] = source_name
    linked_id = ""
    doc_type = cleanSemicolon(src_data.get("Document Type", ""))
    sub_db_id = ""
    strtype = docType.get(doc_type,"")
    if len(strtype) == 0:
        strtype = "K"
    ref_data["old_linked_id"] = old_linked_id
    ref_data["linked_id"] = linked_id
    ref_data["sub_db_id"] = sub_db_id
    ref_data["strtype"] = strtype

    refer_text_site = ""
    if len(author) > 0:
        refer_text_site += author.replace(";", ",") + "."
    if len(title) > 0:
        refer_text_site += title + "[{}].".format(strtype)
    if len(source_name) > 0:
        refer_text_site += source_name + ","
    if len(pub_year) > 0:
        refer_text_site += pub_year
    if len(refer_text_site) > 0 and refer_text_site[-1] == ",":
        refer_text_site = refer_text_site[:-1]
    if strtype == "J":
        if len(vol) > 0:
            refer_text_site += "," + vol
        if len(num) > 0:
            refer_text_site += "({})".format(num)
    if strtype in ("J", "M", "D", "C"):
        if len(begin_page) > 0:
            refer_text_site += ":" + begin_page
            if len(end_page) > 0:
                refer_text_site += "-" + end_page
    ref_data["refer_text_site"] = refer_text_site
    return ref_data


def parse_scopusjournal_ref(ref_data,down_dict):
    list_ref = []
    list_ref_id = []
    idx = 0
    for k,src_data in down_dict.items():
        if 'records' in src_data.keys():
            records = src_data["records"]
        else:
            records = src_data["refs"]
        for k,item in records.items():
            if isinstance(item, str):
                item = json.loads(item)
            ref_str = json.dumps(item,ensure_ascii=False)
            idx += 1
            ref = {}
            ref["cited_rawid"] = ref_data["rawid"]
            ref["cited_lngid"] = ref_data["lngid"]
            ref["cited_pub_year"] = ref_data.get("pub_year","")
            ref_lngid = "{}{}".format(ref_data["lngid"], str(idx).zfill(4))
            ref["lngid"] = ref_lngid
            ref["keyid"] = ref_lngid
            ref["refer_text_raw"] = ref_str.strip()
            ref = set_scopus_ref_cited(ref, item)
            list_ref.append(ref)
            list_ref_id.append(ref_lngid)
    ref_cnt = len(list_ref)
    ref_data["ref_cnt"] = str(ref_cnt)
    if ref_cnt > 0:
        ref_data["ref_id"] = ";".join(list_ref_id)
        ref_data["refer_info"] = list_ref
    return ref_data


def parse_scopusjournal_article_csv(src_data,redis_conn):
    src_data = deal_data(src_data)
    codelanguageMap = initlanguageMap()
    data = {}
    rawid = cleanSemicolon(src_data.get("EID", ""))
    raw_type = cleanSemicolon(src_data.get("Document_Type", ""))
    if "conference" in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "3"
    sub_db_id = "00164"
    product = "SCOPUS"
    sub_db = "QK"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = src_data.get("Link", "").strip()
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    rawid_alt = ""
    pm_id = cleanSemicolon(src_data.get("PubMed_ID", ""))
    if len(pm_id) > 0:
        rawid_alt = "pubmed@{}".format(pm_id)
    data["rawid_alt"] = rawid_alt

    issn = src_data.get("ISSN", "").strip()
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)

    journal_name = src_data.get("Source_title", "").strip()
    country = ""
    language = ""
    journal_raw_id = ""
    jinfo = getQkInfo(redis_conn, issn, "", journal_name)
    if jinfo is not None:
        country = jinfo.get("country", "")
        language = jinfo.get("language", "")
        journal_raw_id = jinfo.get("journal_raw_id", "")
    data["country"] = country
    data["journal_raw_id"] = journal_raw_id
    if len(language) == 0:
        lgstr = data.get("Language_of_Original_Document", "").strip()
        if len(lgstr) > 0:
            lgStrings = lgstr.upper().split(";")
            hashSet = set()
            if len(lgStrings) > 1:
                for lg in lgStrings:
                    tmp = codelanguageMap.get(lg, "")
                    if tmp and len(tmp) > 0:
                        hashSet.add(tmp)
                language = ";".join(hashSet)
            else:
                language = codelanguageMap.get(lgstr.upper(), "")
            if not language:
                language = ""
        else:
            language = ""
    data["language"] = language
    data["journal_name"] = journal_name
    data["journal_name_alt"] = cleanSemicolon(src_data.get("Abbreviated_Source_Title", ""))
    data["doc_no"] = cleanSemicolon(src_data.get("Art__No_", ""))
    # data["subject"] = cleanSemicolon(src_data.get("subject", ""))
    data["keyword"] = cleanSemicolon(src_data.get("Author_Keywords", ""))
    data["keyword_machine"] = cleanSemicolon(src_data.get("Indexed_Keywords", ""))

    title = cleanSemicolon(src_data.get("Title", ""))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    data["title_alt"] = cleanSemicolon(src_data.get("Title_of_translation", ""))
    abstract_ = cleanSemicolon(src_data.get("Abstract", ""))
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = abstract_
    data["vol"] = cleanSemicolon(src_data.get("Volume", ""))
    data["num"] = cleanSemicolon(src_data.get("Issue", ""))
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    data["issn"] = issn

    data["begin_page"] = cleanSemicolon(src_data.get("Page_start", ""))
    data["end_page"] = cleanSemicolon(src_data.get("Page_end", ""))
    data["page_cnt"] = cleanSemicolon(src_data.get("Page_count", ""))

    cited_cnt = cleanSemicolon(src_data.get("Cited_by", ""))
    if len(cited_cnt) > 0:
        cited_cnt = cited_cnt + "@" + down_date
    else:
        cited_cnt = "0@" + down_date
    data["cited_cnt"] = cited_cnt

    ref_cnt = "0"
    refs = cleanSemicolon(src_data.get("References", ""))
    if len(refs) > 0:
        ref_cnt = str(len(refs.split(";")))
    data["References_latest"] = refs
    data["ref_cnt"] = ref_cnt


    author_str = cleanSemicolon(src_data.get("Authors", ""))
    if author_str.count(",") > 2 and ";" not in author_str:
        author_str = ""
    author_full_str = cleanSemicolon(src_data.get("Author_full_names", ""))
    auids_str = cleanSemicolon(src_data.get("Author_s__ID", ""))
    organ_str = cleanSemicolon(src_data.get("Affiliations", ""))
    auor_str = cleanSemicolon(src_data.get("Authors_with_affiliations", ""))
    data["author_raw"] = author_str + "&&" + auor_str + "&&" + author_full_str
    data["preferred_organ"] = organ_str

    aus = ""
    auids = ""
    organs = ""
    auors = ""
    author_id = ""
    author = ""
    author_1st = ""
    organ = ""
    organ_1st = ""
    email = ""

    if len(author_str) > 0 and "[No author name available]" not in author_str:
        author_str = author_str.replace("'", "")
        if ";" in author_str:
            aus = author_str.split(";")
        elif "," in author_str:
            aus = author_str.split(",")
        else:
            aus = [author_str]
    if len(author_str) > 0 and "[No author id available]" not in author_str:
        auids = auids_str.split(";")
    if len(organ_str) > 0:
        organs = organ_str.split(";")
    if len(auor_str) > 0:  # 作者与机构关系
        auors = auor_str.split(";")
    AF_AU_dic = get_full_abbr_dic(author_full_str, author_str)
    if len(aus) > 0 and len(auids) > 0 and len(aus) == len(auids):
        sb_list = []
        for i in range(0, len(aus)):
            tmp_au = AF_AU_dic.get(aus[i],"")
            if len(tmp_au) == 0:
                tmp_au = aus[i]
            sb_list.append("{}@{}".format(auids[i], tmp_au))
        author_id = ";".join(sb_list)
    if len(auors) > 0 and len(aus) > 0:
        authorMap = {}
        for i in range(0, len(aus)):
            for j in range(0, len(auors)):
                # if cur_suc_cnt == 18:
                # print(author_str,aus)
                # print(auors[j].replace(",", "").replace(".", "").replace(" ", ""),'----',aus[i].replace(",", "").replace(".", "").replace(" ", ""))
                if auors[j].replace(",", "").replace(".", "").replace(" ", "").find(
                        aus[i].replace(",", "").replace(".", "").replace(" ", "")) != -1:
                    org_list = []
                    if len(organs) > 0:
                        for k in range(0, len(organs)):
                            if auors[j].replace(",", "").replace(".", "").replace(" ", "").find(
                                    organs[k].replace(",", "").replace(".", "").replace(" ", "")) != -1:
                                org_list.append(organs[k].strip())
                    if len(org_list) > 0:
                        authorMap[aus[i].strip().replace("[", "!").replace("]", "#")] = ";".join(org_list)
                    else:
                        authorMap[aus[i].strip().replace("[", "!").replace("]", "#")] = ""
                    break
        result = number_by_map(authorMap,AF_AU_dic)
        if len(authorMap) > 0:
            author = result[0].replace("!", "[").replace("#", "]")
            author_1st = author.split(";")[0].split("[")[0]
            organ = result[1]
            organ_1st = ""
            if len(organ) > 0:
                organ_1st = organ.split(";")[0].replace("[1]", "", 1)
        else:
            # print(json.dumps(item,ensure_ascii=False))
            # sys.exit(-1)
            pass
    if len(organ) == 0 and len(organ_str) > 0:
        organ = organ_str
        organ_1st = organ.split(";")[0].replace("[1]", "", 1)
    # address = cleanSemicolon(src_data.get("Correspondence_Address", ""))
    # if ";" in address:
    #     addrs = address.split(";")
    #     if addrs:
    #         tmp_email = addrs[-1].split(":")
    #         if tmp_email and len(tmp_email) == 2:
    #             email = f"{tmp_email[1].strip()}:{addrs[0].strip()}"
    data["corr_author"] = cleanSemicolon(src_data.get("Correspondence_Address", ""))
    data["author"] = author
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    data["email"] = email
    data["author_id"] = author_id
    data["fund"] = cleanSemicolon(src_data.get("Funding_Details", ""))
    data["fund_alt"] = cleanSemicolon(src_data.get("Funding_Text_1", ""))
    data["sub_db_class_name"] = cleanSemicolon(src_data.get("CODEN", ""))

    pub_year = cleanSemicolon(src_data.get("Year", ""))
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    isa = cleanSemicolon(src_data.get("Open_Access", ""))
    is_oa = "0"
    if isa == "Open Access":
        is_oa = "1"
    data["is_oa"] = is_oa
    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    return data


def parse_scopusjournal_article_csv_json(src_data,redis_conn):
    src_data = deal_data(src_data)
    data = {}
    rawid = cleanSemicolon(src_data.get("eid", "").strip())
    raw_type = cleanSemicolon(src_data.get("documentType", "").strip())
    if "conference" in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "3"
    sub_db_id = "00164"
    product = "SCOPUS"
    sub_db = "QK"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    links = src_data.get("links")
    provider_url = ""
    if links:
        for item in links:
            if item.get("rel","") == "self":
                provider_url = item.get("href","")
                break
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    source = src_data.get("source",{})
    issn = cleanSemicolon(source.get("issn",""))
    if len(issn) == 0:
        issn = cleanSemicolon(source.get("issnp",""))
    data["issn"] = issn
    data["eissn"] = cleanSemicolon(source.get("eissn",""))
    data["publisher"] = cleanSemicolon(source.get("publisher",""))
    data["journal_raw_id"] = cleanSemicolon(source.get("id",""))
    data["journal_name"] = cleanSemicolon(source.get("title",""))
    data["journal_name_alt"] = cleanSemicolon(source.get("sourceTitleAbbreviation",""))
    data["sub_db_class_name"] = cleanSemicolon(source.get("coden", ""))
    title = cleanSemicolon(src_data.get("title", ""))
    if len(title) == 0:
        titles = src_data.get("titles", [])
        if len(titles) > 0:
            tmp_list = []
            for item in titles:
                tmp_list.append(item)
            title = cleanSemicolon(";".join(tmp_list))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    abstract_ = ""
    abs_list = src_data.get("abstractText",[])
    if isinstance(abs_list, list) and len(abs_list) > 0:
        tmp_list = []
        for item in abs_list:
            tmp_list.append(cleanSemicolon(item))
        abstract_ = ";".join(tmp_list)
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = cleanSemicolon(abstract_)

    source_rp = src_data.get("sourceRelationship",{})
    data["vol"] = cleanSemicolon(source_rp.get("volume", ""))
    data["num"] = cleanSemicolon(source_rp.get("issue", ""))
    data["page_cnt"] = cleanSemicolon(source_rp.get("pageCount", ""))
    data["doc_no"] = cleanSemicolon(source_rp.get("articleNumber", ""))

    pages = source_rp.get("pages", {})
    data["begin_page"] = cleanSemicolon(pages.get("pageFirst", ""))
    data["end_page"] = cleanSemicolon(pages.get("pageLast", ""))
    data["page_info"] = cleanSemicolon(pages.get("pageInfo", ""))

    cited_info = src_data.get("citations", {})
    cited_cnt = str(cited_info.get("count",""))
    if len(cited_cnt) > 0:
        cited_cnt = cited_cnt + "@" + down_date
    else:
        cited_cnt = ""
    data["cited_cnt"] = cited_cnt

    ref_info = src_data.get("references", {})
    ref_cnt = str(ref_info.get("count", ""))
    if len(ref_cnt) == 0:
        ref_cnt = ""
    data["ref_cnt"] = ref_cnt


    author_id = ""
    aus_list = src_data.get("authors", [])
    if len(aus_list) > 0:
        tmp_list = []
        for item in aus_list:
            au_id = item.get("authorId","")
            au_info = item.get("preferredName",{})
            au_name = au_info.get("full","")
            if len(au_name) > 0:
                tmp_list.append("{}@{}".format(au_id, au_name))
        author_id = ";".join(tmp_list)
    data["author_id"] = author_id
    subjectAreas = src_data.get("subjectAreas", [])
    subject = ""
    sub_list = []
    for item in subjectAreas:
        code = str(item.get("code",""))
        if len(code) > 0:
            sub_list.append("{}@{}".format(code,item.get("displayName","")))
    if len(sub_list) > 0:
        subject = ";".join(sub_list)
    data["subject"] = subject

    pub_year = cleanSemicolon(str(src_data.get("pubYear", "")))
    if len(pub_year) == 0:
        pub_year = cleanSemicolon(source.get("publicationYear", ""))
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    data["databaseDocumentIds_latest"] = json.dumps(source.get("databaseDocumentIds", {}),ensure_ascii=False)
    return data


def parse_scopusjournal_article_json(src_data,redis_conn):
    codelanguageMap = initlanguageMap()
    csv_info = deal_data(src_data["csv_info"])
    data = {}
    rawid = cleanSemicolon(src_data.get("eid", "").strip())
    raw_type = cleanSemicolon(csv_info.get("Document_Type", "").strip())
    if "conference" in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "3"
    sub_db_id = "00164"
    product = "SCOPUS"
    sub_db = "QK"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = cleanSemicolon(src_data.get("inwardLink", ""))
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    rawid_alt = ""
    pm_info = src_data.get("pubmedInfo", {})
    if isinstance(pm_info,dict):
        pm_id = pm_info.get("id","")
        if len(pm_id) > 0:
            rawid_alt = "pubmed@{}".format(pm_id)
    data["rawid_alt"] = rawid_alt
    language = ""
    lgstr = src_data.get("language", "").strip()
    if len(lgstr) > 0:
        lgStrings = lgstr.upper().split(";")
        hashSet = set()
        if len(lgStrings) > 1:
            for lg in lgStrings:
                tmp = codelanguageMap.get(lg, "")
                if tmp and len(tmp) > 0:
                    hashSet.add(tmp)
            language = ";".join(hashSet)
        else:
            language = codelanguageMap.get(lgstr.upper(), "")
        if not language:
            language = ""
    else:
        language = ""
    data["language"] = language


    data["doi"] = cleanSemicolon(src_data.get("doi", ""))
    source = src_data.get("source",{})
    data["issn"] = cleanSemicolon(source.get("issn",""))
    data["publisher"] = cleanSemicolon(source.get("publisher",""))
    data["journal_raw_id"] = cleanSemicolon(source.get("id",""))
    data["journal_name"] = cleanSemicolon(source.get("title",""))
    data["journal_name_alt"] = cleanSemicolon(source.get("abbreviatedSourceTitle",""))

    data["vol"] = cleanSemicolon(source.get("volume", ""))
    data["num"] = cleanSemicolon(source.get("issue", ""))
    data["page_cnt"] = cleanSemicolon(source.get("pageCount", ""))
    data["doc_no"] = cleanSemicolon(source.get("articleNumber", ""))
    data["sub_db_class_name"] = cleanSemicolon(source.get("coden", ""))
    data["begin_page"] = cleanSemicolon(source.get("firstPage", ""))
    data["end_page"] = cleanSemicolon(source.get("lastPage", ""))
    data["page_info"] = cleanSemicolon(source.get("pages", "")).replace("-", "-")
    subject_area = source.get("subjectAreaCodes", [])
    subject = ""
    sub_list = []
    for code in subject_area:
        if len(code) > 0:
            sub_name = subject_dic.get(code,"")
            if len(sub_name) > 0:
                sub_list.append("{}@{}".format(code, sub_name))
    if len(sub_list) > 0:
        subject = ";".join(sub_list)
    data["subject"] = subject

    title = ""
    titles = src_data.get("titles", [])
    if isinstance(titles, list) and len(titles) > 0:
        tmp_list = []
        for item in titles:
            tmp_list.append(item)
        title = cleanSemicolon(";".join(tmp_list))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    abstract_ = ""
    abs_list = src_data.get("abstract",[])
    if isinstance(abs_list, list) and len(abs_list) > 0:
        tmp_list = []
        for item in abs_list:
            tmp_list.append(cleanSemicolon(item))
        abstract_ = ";".join(tmp_list)
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = cleanSemicolon(abstract_)

    keyword = ""
    kwords = src_data.get("authorKeywords", [])
    if isinstance(kwords, list) and len(kwords) > 0:
        tmp_list = []
        for item in kwords:
            tmp_list.append(item)
        keyword = cleanSemicolon(";".join(tmp_list))
    data["keyword"] = keyword
    keyword_machine = ""
    km_words = src_data.get("indexedKeywords", [])
    if isinstance(km_words, list) and len(km_words) > 0:
        tmp_list = []
        for k,items in km_words.items():
            for item in items:
                tmp_list.append(item)
        keyword_machine = cleanSemicolon(";".join(tmp_list))
    data["keyword_machine"] = keyword_machine

    author = ""
    author_1st = ""
    author_id = ""
    organ = ""
    organ_id = ""
    organ_1st = ""
    email = ""

    authors = src_data.get("authors", [])
    au_ids_list = []
    au_email_list = []
    au_list = []
    if isinstance(authors,list) and len(authors) > 0:
        for item in authors:
            au_id = cleanSemicolon(item.get("id",""))
            au_name = cleanSemicolon(item.get("name",""))
            fisrt_name = cleanSemicolon(item.get("firstName", ""))
            last_name = cleanSemicolon(item.get("lastName", ""))
            if len(fisrt_name) > 0 and len(last_name) > 0:
                au_name = fisrt_name + " " + last_name
            au_email = cleanSemicolon(item.get("email", ""))
            au_org_refs = item.get("affiliationReferences",[])
            if len(au_name) > 0:
                if len(au_id) > 0:
                    au_ids_list.append("{}@{}".format(au_id, au_name))
                if len(au_email) > 0:
                    au_email_list.append("{}:{}".format(au_email, au_name))
                if len(au_org_refs) > 0:
                    tmp_list = []
                    for tmp in au_org_refs:
                        tmp_list.append(letter_to_number(tmp))
                    if len(tmp_list) > 0:
                        au_list.append("{}[{}]".format(au_name,",".join(tmp_list)))
                    else:
                        au_list.append(au_name)
        if len(au_ids_list) > 0:
            author_id = ";".join(au_ids_list)
        if len(au_email_list) > 0:
            email = ";".join(au_email_list)
        if len(au_list) > 0:
            author = ";".join(au_list)
            author_1st = au_list[0].split("[")[0]
    organs = src_data.get("affiliations", [])
    if isinstance(organs, list) and len(organs) > 0:
        if len(organs) > 0:
            org_ids_list = []
            org_list = []
            for item in organs:
                org_id = cleanSemicolon(item.get("id",""))
                org_name = cleanSemicolon(item.get("name", ""))
                org_full_name = cleanSemicolon(item.get("fullName", ""))
                if len(org_full_name) == 0:
                    org_full_name = org_name
                org_num = cleanSemicolon(item.get("reference",""))
                if len(org_name) > 0:
                    if len(org_id) > 0:
                        org_ids_list.append("{}@{}".format(org_id, org_name))
                    if len(org_num) > 0:
                        org_list.append("[{}]{}".format(letter_to_number(org_num),org_full_name))
                    else:
                        org_list.append("{}".format(org_full_name))
            if len(org_ids_list) > 0:
                organ_id = ";".join(org_ids_list)
            if len(org_list) > 0:
                organ_1st = org_list[0].replace("[1]", "", 1)

    data["author"] = author
    data["author_1st"] = author_1st
    data["author_id"] = author_id
    data["email"] = email
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = organ_1st

    fund = ""
    fund_id = ""
    fund_alt = ""
    fund_infos = src_data.get("fundingDetails",{})
    if isinstance(fund_infos,dict):
        fund_txt = fund_infos.get("fundingTexts",[])
        if len(fund_txt) > 0:
            tmp_list = []
            for item in fund_txt:
                tmp_list.append(cleanSemicolon(item))
            if len(tmp_list) > 0:
                fund_alt = ";".join(tmp_list)
        fund_list = fund_infos.get("fundingList",[])
        if len(fund_list) > 0:
            fund_id_list = []
            fd_list = []
            for item in fund_list:
                fund_num = cleanSemicolon(item.get("numbers",""))
                fund_name = cleanSemicolon(item.get("sponsor", ""))
                fund_link = cleanSemicolon(item.get("sponsorLink", ""))
                if len(fund_name) > 0:
                    if len(fund_num) > 0:
                        fund_name = "{}({})".format(fund_name,fund_num)
                    if len(fund_link) > 0 and "/" in fund_link:
                        tms = fund_link.split("/")
                        fund_id_list.append("{}@{}".format(tms[-1],fund_name))
                    fd_list.append(fund_name)
            if len(fund_id_list) > 0:
                fund_id = ";".join(fund_id_list)
            if len(fd_list) > 0:
                fund = ";".join(fd_list)
    data["fund"] = fund
    data["fund_id"] = fund_id
    data["fund_alt"] = fund_alt

    corr_aus = src_data.get("correspondences",[])
    correspondences_latest = ""
    corr_author = ""
    if isinstance(corr_aus, list) and len(corr_aus) > 0:
        corr_au_list = []
        for item in corr_aus:
            tmp_corr = ""
            corr_name = cleanSemicolon(item.get("person",""))
            corr_org = cleanSemicolon(item.get("affiliation",""))
            corr_email = cleanSemicolon(item.get("eaddress",""))
            if len(corr_name) > 0:
                tmp_corr = corr_name
            if len(corr_org) > 0:
                tmp_corr = tmp_corr + "," + corr_org
            if len(corr_email) > 0:
                tmp_corr = tmp_corr + "," + corr_email
            if len(tmp_corr) > 0:
                corr_au_list.append(tmp_corr)
        if len(corr_au_list) > 0:
            corr_author = ";".join(corr_au_list)
        correspondences_latest = json.dumps(corr_aus,ensure_ascii=False)
    data["corr_author"] = corr_author
    data["correspondences_latest"] = correspondences_latest
    pub_year = cleanSemicolon(str(src_data.get("publicationYear", "")))
    pub_date = ""
    pd_info = src_data.get("publicationDate", {})
    if isinstance(pd_info, dict):
        if len(pub_year) != 4:
            pub_year = pd_info.get("year","")
        month = str(pd_info.get("month","00"))
        if month == "None":
            month = "00"
        day = str(pd_info.get("day", "00"))
        if day == "None":
            day = "00"
        pub_date = pub_year + month + day
    else:
        if len(pub_year) == 4:
            pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    oa_info = src_data.get("openAccess")
    is_oa = "0"
    if oa_info and oa_info == True:
        is_oa = "1"
    data["is_oa"] = is_oa
    tmp_oa = src_data.get("openAccessTypes", [])
    openAccessTypes_latest = ""
    if isinstance(tmp_oa, list) and len(tmp_oa) > 0:
        openAccessTypes_latest = json.dumps(tmp_oa, ensure_ascii=False)
    data["openAccessTypes_latest"] = openAccessTypes_latest
    tmp_dd = src_data.get("databaseDocumentIds", {})
    databaseDocumentIds_latest = ""
    if isinstance(tmp_dd, dict) and len(tmp_dd) > 0:
        databaseDocumentIds_latest = json.dumps(tmp_dd, ensure_ascii=False)
    data["databaseDocumentIds_latest"] = databaseDocumentIds_latest
    tmp_cp = source.get("copyright", {})
    copyright_latest = ""
    if isinstance(tmp_cp, dict) and len(tmp_cp) > 0:
        copyright_latest = json.dumps(tmp_cp, ensure_ascii=False)
    data["copyright_latest"] = copyright_latest
    return data


def parse_scopusconference_article_csv(src_data,redis_conn):
    src_data = deal_data(src_data)
    codelanguageMap = initlanguageMap()
    data = {}
    rawid = cleanSemicolon(src_data.get("EID", ""))
    raw_type = cleanSemicolon(src_data.get("Document_Type", ""))
    if "conference" not in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "6"
    sub_db_id = "00165"
    product = "SCOPUS"
    sub_db = "HY"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = src_data.get("Link", "").strip()
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    rawid_alt = ""
    pm_id = cleanSemicolon(src_data.get("PubMed_ID", ""))
    if len(pm_id) > 0:
        rawid_alt = "pubmed@{}".format(pm_id)
    data["rawid_alt"] = rawid_alt


    issn = src_data.get("ISSN", "").strip()
    if len(issn) > 0:
        if len(issn) == 8:
            issn = "{}-{}".format(issn[0:4], issn[4:8])
        elif "-" not in issn:
            res = "{:8s}".format(issn)
            issn = res.replace(" ", "0")
            sb = list(issn)
            sb.insert(4, '-')
            issn = ''.join(sb)

    country = ""
    language = ""
    data["country"] = country
    if len(language) == 0:
        lgstr = data.get("Language_of_Original_Document", "").strip()
        if len(lgstr) > 0:
            lgStrings = lgstr.upper().split(";")
            hashSet = set()
            if len(lgStrings) > 1:
                for lg in lgStrings:
                    tmp = codelanguageMap.get(lg, "")
                    if tmp and len(tmp) > 0:
                        hashSet.add(tmp)
                language = ";".join(hashSet)
            else:
                language = codelanguageMap.get(lgstr.upper(), "")
            if not language:
                language = ""
        else:
            language = ""
    data["language"] = language
    data["doc_no"] = cleanSemicolon(src_data.get("Art__No_", ""))
    # data["subject"] = cleanSemicolon(src_data.get("subject", ""))
    data["keyword"] = cleanSemicolon(src_data.get("Author_Keywords", ""))
    data["keyword_machine"] = cleanSemicolon(src_data.get("Indexed_Keywords", ""))
    title = cleanSemicolon(src_data.get("Title", ""))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    data["title_alt"] = cleanSemicolon(src_data.get("Title_of_translation", ""))
    abstract_ = cleanSemicolon(src_data.get("Abstract", ""))
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = abstract_
    data["vol"] = cleanSemicolon(src_data.get("Volume", ""))
    data["num"] = cleanSemicolon(src_data.get("Issue", ""))
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    data["issn"] = issn

    data["begin_page"] = cleanSemicolon(src_data.get("Page_start", ""))
    data["end_page"] = cleanSemicolon(src_data.get("Page_end", ""))
    data["page_cnt"] = cleanSemicolon(src_data.get("Page_count", ""))


    data["meeting_record_name"] = cleanSemicolon(src_data.get("Source_title", ""))
    data["meeting_name"] = cleanSemicolon(src_data.get("Conference_name", ""))
    meeting_date_raw = cleanSemicolon(src_data.get("Conference_date", ""))
    data["meeting_date_raw"] = meeting_date_raw
    data["meeting_place"] = cleanSemicolon(src_data.get("Conference_location", ""))
    data["meeting_code"] = cleanSemicolon(src_data.get("Conference_code", ""))
    data["publisher"] = cleanSemicolon(src_data.get("Publisher", ""))
    data["sponsor"] = cleanSemicolon(src_data.get("Sponsors", ""))

    accept_date = ""
    try:
        if "through" in meeting_date_raw:
            dates = meeting_date_raw.split("through")
            accept_date = datetime.datetime.strptime(dates[0].strip(), "%d %B %Y").strftime("%Y%m%d")
        elif len(meeting_date_raw) > 0:
            accept_date = datetime.datetime.strptime(meeting_date_raw, "%d %B %Y").strftime("%Y%m%d")
    except:
        pass
    data["accept_date"] = accept_date
    cited_cnt = cleanSemicolon(src_data.get("Cited_by", ""))
    if len(cited_cnt) > 0:
        cited_cnt = cited_cnt + "@" + down_date
    else:
        cited_cnt = "0@" + down_date
    data["cited_cnt"] = cited_cnt

    ref_cnt = "0"
    refs = cleanSemicolon(src_data.get("References", ""))
    if len(refs) > 0:
        ref_cnt = str(len(refs.split(";")))
    data["References_latest"] = refs
    data["ref_cnt"] = ref_cnt

    author_str = cleanSemicolon(src_data.get("Authors", ""))
    if author_str.count(",") > 2 and ";" not in author_str:
        author_str = ""
    author_full_str = cleanSemicolon(src_data.get("Author_full_names", ""))
    auids_str = cleanSemicolon(src_data.get("Author_s__ID", ""))
    organ_str = cleanSemicolon(src_data.get("Affiliations", ""))
    auor_str = cleanSemicolon(src_data.get("Authors_with_affiliations", ""))
    data["author_raw"] = author_str + "&&" + auor_str + "&&" + author_full_str
    data["preferred_organ"] = organ_str

    aus = ""
    auids = ""
    organs = ""
    auors = ""
    author_id = ""
    author = ""
    author_1st = ""
    organ = ""
    organ_1st = ""
    email = ""

    if len(author_str) > 0 and "[No author name available]" not in author_str:
        author_str = author_str.replace("'", "")
        if ";" in author_str:
            aus = author_str.split(";")
        elif "," in author_str:
            aus = author_str.split(",")
        else:
            aus = [author_str]
    if len(author_str) > 0 and "[No author id available]" not in author_str:
        auids = auids_str.split(";")
    if len(organ_str) > 0:
        organs = organ_str.split(";")
    if len(auor_str) > 0:  # 作者与机构关系
        auors = auor_str.split(";")
    AF_AU_dic = get_full_abbr_dic(author_full_str, author_str)
    if len(aus) > 0 and len(auids) > 0 and len(aus) == len(auids):
        sb_list = []
        for i in range(0, len(aus)):
            tmp_au = AF_AU_dic.get(aus[i], "")
            if len(tmp_au) == 0:
                tmp_au = aus[i]
            sb_list.append("{}@{}".format(auids[i], tmp_au))
        author_id = ";".join(sb_list)
    if len(auors) > 0 and len(aus) > 0:
        authorMap = {}
        for i in range(0, len(aus)):
            for j in range(0, len(auors)):
                # if cur_suc_cnt == 18:
                # print(author_str,aus)
                # print(auors[j].replace(",", "").replace(".", "").replace(" ", ""),'----',aus[i].replace(",", "").replace(".", "").replace(" ", ""))
                if auors[j].replace(",", "").replace(".", "").replace(" ", "").find(
                        aus[i].replace(",", "").replace(".", "").replace(" ", "")) != -1:
                    org_list = []
                    if len(organs) > 0:
                        for k in range(0, len(organs)):
                            if auors[j].replace(",", "").replace(".", "").replace(" ", "").find(
                                    organs[k].replace(",", "").replace(".", "").replace(" ", "")) != -1:
                                org_list.append(organs[k].strip())
                    if len(org_list) > 0:
                        authorMap[aus[i].strip().replace("[", "!").replace("]", "#")] = ";".join(org_list)
                    else:
                        authorMap[aus[i].strip().replace("[", "!").replace("]", "#")] = ""
                    break
        result = number_by_map(authorMap,AF_AU_dic)
        if len(authorMap) > 0:
            author = result[0].replace("!", "[").replace("#", "]")
            author_1st = author.split(";")[0].split("[")[0]
            organ = result[1]
            organ_1st = ""
            if len(organ) > 0:
                organ_1st = organ.split(";")[0].replace("[1]", "", 1)
        else:
            # print(json.dumps(item,ensure_ascii=False))
            # sys.exit(-1)
            pass
    if len(organ) == 0 and len(organ_str) > 0:
        organ = organ_str
        organ_1st = organ.split(";")[0].replace("[1]", "", 1)
    # address = cleanSemicolon(src_data.get("Correspondence_Address", ""))
    # if ";" in address:
    #     addrs = address.split(";")
    #     if addrs:
    #         tmp_email = addrs[-1].split(":")
    #         if tmp_email and len(tmp_email) == 2:
    #             email = f"{tmp_email[1].strip()}:{addrs[0].strip()}"
    data["corr_author"] = cleanSemicolon(src_data.get("Correspondence_Address", ""))
    data["author"] = author
    data["author_1st"] = author_1st
    data["organ"] = organ
    data["organ_1st"] = organ_1st
    data["email"] = email
    data["author_id"] = author_id
    data["fund"] = cleanSemicolon(src_data.get("Funding_Details", ""))
    data["fund_alt"] = cleanSemicolon(src_data.get("Funding_Text_1", ""))
    data["sub_db_class_name"] = cleanSemicolon(src_data.get("CODEN", ""))

    pub_year = cleanSemicolon(src_data.get("Year", ""))
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    isa = cleanSemicolon(src_data.get("Open_Access", ""))
    is_oa = "0"
    if isa == "Open Access":
        is_oa = "1"
    data["is_oa"] = is_oa
    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    return data


def parse_scopusconference_article_csv_json(src_data,redis_conn):
    src_data = deal_data(src_data)
    data = {}
    rawid = cleanSemicolon(src_data.get("eid", "").strip())
    raw_type = cleanSemicolon(src_data.get("documentType", "").strip())
    if "conference" not in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "6"
    sub_db_id = "00165"
    product = "SCOPUS"
    sub_db = "HY"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    links = src_data.get("links")
    provider_url = ""
    if links:
        for item in links:
            if item.get("rel","") == "self":
                provider_url = item.get("href","")
                break
    data["provider_url"] = provider_url
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    data["doi"] = cleanSemicolon(src_data.get("DOI", ""))
    source = src_data.get("source",{})
    issn = cleanSemicolon(source.get("issn",""))
    if len(issn) == 0:
        issn = cleanSemicolon(source.get("issnp",""))
    data["issn"] = issn
    data["eissn"] = cleanSemicolon(source.get("eissn",""))
    data["publisher"] = cleanSemicolon(source.get("publisher",""))
    data["meeting_record_code"] = cleanSemicolon(source.get("id",""))
    data["meeting_record_name"] = cleanSemicolon(source.get("title",""))
    data["meeting_record_alt"] = cleanSemicolon(source.get("sourceTitleAbbreviation",""))
    data["sub_db_class_name"] = cleanSemicolon(source.get("coden", ""))
    title = cleanSemicolon(src_data.get("title", ""))
    if len(title) == 0:
        titles = src_data.get("titles", [])
        if len(titles) > 0:
            tmp_list = []
            for item in titles:
                tmp_list.append(item)
            title = cleanSemicolon(";".join(tmp_list))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    abstract_ = ""
    abs_list = src_data.get("abstractText",[])
    if isinstance(abs_list,list) and len(abs_list) > 0:
        tmp_list = []
        for item in abs_list:
            tmp_list.append(cleanSemicolon(item))
        abstract_ = ";".join(tmp_list)
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = cleanSemicolon(abstract_)

    source_rp = src_data.get("sourceRelationship",{})
    data["vol"] = cleanSemicolon(source_rp.get("volume", ""))
    data["num"] = cleanSemicolon(source_rp.get("issue", ""))
    data["page_cnt"] = cleanSemicolon(source_rp.get("pageCount", ""))
    data["doc_no"] = cleanSemicolon(source_rp.get("articleNumber", ""))

    pages = source_rp.get("pages", {})
    data["begin_page"] = cleanSemicolon(pages.get("pageFirst", ""))
    data["end_page"] = cleanSemicolon(pages.get("pageLast", ""))
    data["page_info"] = cleanSemicolon(pages.get("pageInfo", ""))

    cited_info = src_data.get("citations", {})
    cited_cnt = str(cited_info.get("count",""))
    if len(cited_cnt) > 0:
        cited_cnt = cited_cnt + "@" + down_date
    else:
        cited_cnt = ""
    data["cited_cnt"] = cited_cnt

    ref_info = src_data.get("references", {})
    ref_cnt = str(ref_info.get("count", ""))
    if len(ref_cnt) == 0:
        ref_cnt = ""
    data["ref_cnt"] = ref_cnt


    author_id = ""
    aus_list = src_data.get("authors", [])
    if len(aus_list) > 0:
        tmp_list = []
        for item in aus_list:
            au_id = item.get("authorId","")
            au_info = item.get("preferredName",{})
            au_name = au_info.get("full","")
            if len(au_name) > 0:
                tmp_list.append("{}@{}".format(au_id, au_name))
        author_id = ";".join(tmp_list)
    data["author_id"] = author_id
    subjectAreas = src_data.get("subjectAreas", [])
    subject = ""
    sub_list = []
    for item in subjectAreas:
        code = str(item.get("code",""))
        if len(code) > 0:
            sub_list.append("{}@{}".format(code,item.get("displayName","")))
    if len(sub_list) > 0:
        subject = ";".join(sub_list)
    data["subject"] = subject

    pub_year = cleanSemicolon(str(src_data.get("pubYear", "")))
    if len(pub_year) == 0:
        pub_year = cleanSemicolon(source.get("publicationYear", ""))
    pub_date = ""
    if len(pub_year) == 4:
        pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    data["databaseDocumentIds_latest"] = json.dumps(source.get("databaseDocumentIds", {}),ensure_ascii=False)
    return data


def parse_scopusconference_article_json(src_data,redis_conn):
    codelanguageMap = initlanguageMap()
    csv_info = deal_data(src_data["csv_info"])
    data = {}
    rawid = cleanSemicolon(src_data.get("eid", "").strip())
    raw_type = cleanSemicolon(csv_info.get("Document_Type", "").strip())
    if "conference" not in raw_type.lower():
        return -1
    if len(rawid) == 0:
        return -2
    source_type = "6"
    sub_db_id = "00165"
    product = "SCOPUS"
    sub_db = "HY"
    provider = "ELSEVIER"
    batch = time.strftime("%Y%m%d_%H%I%S", time.localtime(time.time()))
    data["batch"] = batch
    down_date = src_data["down_date"]
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["sub_db"] = sub_db
    data["sub_db_id"] = sub_db_id
    data["product"] = product
    data["provider"] = provider
    data["source_type"] = source_type
    data["provider_url"] = cleanSemicolon(src_data.get("inwardLink", ""))
    data["raw_type"] = raw_type
    data["rawid"] = rawid
    data["lngid"] = BaseLngid().GetLngid(sub_db_id, rawid, False)
    rawid_alt = ""
    pm_info = src_data.get("pubmedInfo", {})
    if isinstance(pm_info,dict):
        pm_id = pm_info.get("id","")
        if len(pm_id) > 0:
            rawid_alt = "pubmed@{}".format(pm_id)
    data["rawid_alt"] = rawid_alt
    language = ""
    lgstr = src_data.get("language", "").strip()
    if len(lgstr) > 0:
        lgStrings = lgstr.upper().split(";")
        hashSet = set()
        if len(lgStrings) > 1:
            for lg in lgStrings:
                tmp = codelanguageMap.get(lg, "")
                if tmp and len(tmp) > 0:
                    hashSet.add(tmp)
            language = ";".join(hashSet)
        else:
            language = codelanguageMap.get(lgstr.upper(), "")
        if not language:
            language = ""
    else:
        language = ""
    data["language"] = language
    data["doi"] = cleanSemicolon(src_data.get("doi", ""))
    source = src_data.get("source",{})
    data["issn"] = cleanSemicolon(source.get("issn",""))
    data["publisher"] = cleanSemicolon(source.get("publisher",""))
    data["meeting_record_code"] = cleanSemicolon(source.get("id", ""))
    data["meeting_record_name"] = cleanSemicolon(source.get("title", ""))
    data["meeting_record_alt"] = cleanSemicolon(source.get("abbreviatedSourceTitle", ""))
    meeting_name = ""
    meeting_date_raw = ""
    meeting_place = ""
    meeting_counts = ""
    accept_date = ""
    sponsor = ""
    cf_info = source.get("additionalInfoDetails",{})
    if isinstance(cf_info,dict):
        meeting_name = cleanSemicolon(cf_info.get("name", ""))
        if len(meeting_name) == 0:
            meeting_name = cleanSemicolon(cf_info.get("title", ""))
        meeting_counts = cleanSemicolon(cf_info.get("number", ""))
        startDate = cleanSemicolon(cf_info.get("startDate", ""))
        endDate = cleanSemicolon(cf_info.get("endDate", ""))
        if len(startDate) > 0:
            meeting_date_raw = startDate
            if len(endDate) > 0:
                meeting_date_raw += " throgh " + endDate
            try:
                accept_date = datetime.datetime.strptime(startDate, "%d %B %Y").strftime("%Y%m%d")
            except:
                pass
        meeting_place = cleanSemicolon(cf_info.get("cityGroup", ""))
        if len(meeting_place) == 0:
            meeting_place = cleanSemicolon(cf_info.get("city", ""))
        if len(meeting_place) == 0:
            meeting_place = cleanSemicolon(cf_info.get("country", ""))
        cf_sponsor = source.get("conferenceSponsors", [])
        if isinstance(cf_sponsor,list):
            sp_list = []
            for item in cf_sponsor:
                sp_list.append(cleanSemicolon(item))
            sponsor = ";".join(sp_list)
    data["meeting_name"] = meeting_name
    data["meeting_date_raw"] = meeting_date_raw
    data["meeting_place"] = meeting_place
    data["meeting_counts"] = meeting_counts
    data["accept_date"] = accept_date
    data["sponsor"] = sponsor

    data["vol"] = cleanSemicolon(source.get("volume", ""))
    data["num"] = cleanSemicolon(source.get("issue", ""))
    data["page_cnt"] = cleanSemicolon(source.get("pageCount", ""))
    data["doc_no"] = cleanSemicolon(source.get("articleNumber", ""))
    data["sub_db_class_name"] = cleanSemicolon(source.get("coden", ""))
    data["begin_page"] = cleanSemicolon(source.get("firstPage", ""))
    data["end_page"] = cleanSemicolon(source.get("lastPage", ""))
    data["page_info"] = cleanSemicolon(source.get("pages", "")).replace("-", "-")
    subject_area = source.get("subjectAreaCodes", [])
    subject = ""
    sub_list = []
    for code in subject_area:
        if len(code) > 0:
            sub_name = subject_dic.get(code,"")
            if len(sub_name) > 0:
                sub_list.append("{}@{}".format(code, sub_name))
    if len(sub_list) > 0:
        subject = ";".join(sub_list)
    data["subject"] = subject

    title = ""
    titles = src_data.get("titles", [])
    if isinstance(titles, list) and len(titles) > 0:
        tmp_list = []
        for item in titles:
            tmp_list.append(item)
        title = cleanSemicolon(";".join(tmp_list))
    if "[No title available]" in title:
        return -3
    data["title"] = title
    abstract_ = ""
    abs_list = src_data.get("abstract",[])
    if isinstance(abs_list, list) and len(abs_list) > 0:
        tmp_list = []
        for item in abs_list:
            tmp_list.append(cleanSemicolon(item))
        abstract_ = ";".join(tmp_list)
    if "[No abstract available]" in abstract_:
        abstract_ = ""
    data["abstract"] = cleanSemicolon(abstract_)

    keyword = ""
    kwords = src_data.get("authorKeywords", [])
    if isinstance(kwords, list) and len(kwords) > 0:
        tmp_list = []
        for item in kwords:
            tmp_list.append(item)
        keyword = cleanSemicolon(";".join(tmp_list))
    data["keyword"] = keyword
    keyword_machine = ""
    km_words = src_data.get("indexedKeywords", [])
    if isinstance(km_words, list) and len(km_words) > 0:
        tmp_list = []
        for k,items in km_words.items():
            for item in items:
                tmp_list.append(item)
        keyword_machine = cleanSemicolon(";".join(tmp_list))
    data["keyword_machine"] = keyword_machine

    author = ""
    author_1st = ""
    author_id = ""
    organ = ""
    organ_id = ""
    organ_1st = ""
    email = ""

    authors = src_data.get("authors", [])
    au_ids_list = []
    au_email_list = []
    au_list = []
    if isinstance(authors, list) and len(authors) > 0:
        for item in authors:
            au_id = cleanSemicolon(item.get("id",""))
            au_name = cleanSemicolon(item.get("name",""))
            fisrt_name = cleanSemicolon(item.get("firstName", ""))
            last_name = cleanSemicolon(item.get("lastName", ""))
            if len(fisrt_name) > 0 and len(last_name) > 0:
                au_name = fisrt_name + " " + last_name
            au_email = cleanSemicolon(item.get("email", ""))
            au_org_refs = item.get("affiliationReferences",[])
            if len(au_name) > 0:
                if len(au_id) > 0:
                    au_ids_list.append("{}@{}".format(au_id, au_name))
                if len(au_email) > 0:
                    au_email_list.append("{}:{}".format(au_email, au_name))
                if len(au_org_refs) > 0:
                    tmp_list = []
                    for tmp in au_org_refs:
                        tmp_list.append(letter_to_number(tmp))
                    if len(tmp_list) > 0:
                        au_list.append("{}[{}]".format(au_name,",".join(tmp_list)))
                    else:
                        au_list.append(au_name)
        if len(au_ids_list) > 0:
            author_id = ";".join(au_ids_list)
        if len(au_email_list) > 0:
            email = ";".join(au_email_list)
        if len(au_list) > 0:
            author = ";".join(au_list)
            author_1st = au_list[0].split("[")[0]
    organs = src_data.get("affiliations", [])
    if isinstance(organs, list) and len(organs) > 0:
        org_ids_list = []
        org_list = []
        for item in organs:
            org_id = cleanSemicolon(item.get("id",""))
            org_name = cleanSemicolon(item.get("name", ""))
            org_full_name = cleanSemicolon(item.get("fullName", ""))
            if len(org_full_name) == 0:
                org_full_name = org_name
            org_num = cleanSemicolon(item.get("reference",""))
            if len(org_name) > 0:
                if len(org_id) > 0:
                    org_ids_list.append("{}@{}".format(org_id, org_name))
                if len(org_num) > 0:
                    org_list.append("[{}]{}".format(letter_to_number(org_num),org_full_name))
                else:
                    org_list.append("{}".format(org_full_name))
        if len(org_ids_list) > 0:
            organ_id = ";".join(org_ids_list)
        if len(org_list) > 0:
            organ_1st = organ.split(";")[0].replace("[1]", "", 1)

    data["author"] = author
    data["author_1st"] = author_1st
    data["author_id"] = author_id
    data["email"] = email
    data["organ"] = organ
    data["organ_id"] = organ_id
    data["organ_1st"] = organ_1st

    fund = ""
    fund_id = ""
    fund_alt = ""
    fund_infos = src_data.get("fundingDetails",{})
    if isinstance(fund_infos,dict):
        fund_txt = fund_infos.get("fundingTexts",[])
        if len(fund_txt) > 0:
            tmp_list = []
            for item in fund_txt:
                tmp_list.append(cleanSemicolon(item))
            if len(tmp_list) > 0:
                fund_alt = ";".join(tmp_list)
        fund_list = fund_infos.get("fundingList",[])
        if len(fund_list) > 0:
            fund_id_list = []
            fd_list = []
            for item in fund_list:
                fund_num = cleanSemicolon(item.get("numbers",""))
                fund_name = cleanSemicolon(item.get("sponsor", ""))
                fund_link = cleanSemicolon(item.get("sponsorLink", ""))
                if len(fund_name) > 0:
                    if len(fund_num) > 0:
                        fund_name = "{}({})".format(fund_name,fund_num)
                    if len(fund_link) > 0 and "/" in fund_link:
                        tms = fund_link.split("/")
                        fund_id_list.append("{}@{}".format(tms[-1],fund_name))
                    fd_list.append(fund_name)
            if len(fund_id_list) > 0:
                fund_id = ";".join(fund_id_list)
            if len(fd_list) > 0:
                fund = ";".join(fd_list)
    data["fund"] = fund
    data["fund_id"] = fund_id
    data["fund_alt"] = fund_alt

    corr_aus = src_data.get("correspondences",[])
    correspondences_latest = ""
    corr_author = ""
    if isinstance(corr_aus, list) and len(corr_aus) > 0:
        corr_au_list = []
        for item in corr_aus:
            tmp_corr = ""
            corr_name = cleanSemicolon(item.get("person",""))
            corr_org = cleanSemicolon(item.get("affiliation",""))
            corr_email = cleanSemicolon(item.get("eaddress",""))
            if len(corr_name) > 0:
                tmp_corr = corr_name
            if len(corr_org) > 0:
                tmp_corr = tmp_corr + "," + corr_org
            if len(corr_email) > 0:
                tmp_corr = tmp_corr + "," + corr_email
            if len(tmp_corr) > 0:
                corr_au_list.append(tmp_corr)
        if len(corr_au_list) > 0:
            corr_author = ";".join(corr_au_list)
        correspondences_latest = json.dumps(corr_aus,ensure_ascii=False)
    data["corr_author"] = corr_author
    data["correspondences_latest"] = correspondences_latest
    pub_year = cleanSemicolon(str(src_data.get("publicationYear", "")))
    pub_date = ""
    pd_info = src_data.get("publicationDate", {})
    if isinstance(pd_info, dict):
        if len(pub_year) != 4:
            pub_year = pd_info.get("year", "")
        month = str(pd_info.get("month", "00"))
        if month == "None":
            month = "00"
        day = str(pd_info.get("day", "00"))
        if day == "None":
            day = "00"
        pub_date = pub_year + month + day
    else:
        if len(pub_year) == 4:
            pub_date = pub_year + "0000"
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    is_suppl = "0"
    if "suppl" in data["vol"] or "suppl" in data["num"]:
        is_suppl = "1"
    data["is_suppl"] = is_suppl
    oa_info = src_data.get("openAccess")
    is_oa = "0"
    if oa_info and oa_info == True:
        is_oa = "1"
    data["is_oa"] = is_oa
    tmp_oa = src_data.get("openAccessTypes",[])
    openAccessTypes_latest = ""
    if isinstance(tmp_oa,list) and len(tmp_oa) > 0:
        openAccessTypes_latest = json.dumps(tmp_oa,ensure_ascii=False)
    data["openAccessTypes_latest"] = openAccessTypes_latest
    tmp_dd = src_data.get("databaseDocumentIds",{})
    databaseDocumentIds_latest = ""
    if isinstance(tmp_dd,dict) and len(tmp_dd) > 0:
        databaseDocumentIds_latest = json.dumps(tmp_dd,ensure_ascii=False)
    data["databaseDocumentIds_latest"] = databaseDocumentIds_latest
    tmp_cp = source.get("copyright", {})
    copyright_latest = ""
    if isinstance(tmp_cp,dict) and len(tmp_cp) > 0:
        copyright_latest = json.dumps(tmp_cp,ensure_ascii=False)
    data["copyright_latest"] = copyright_latest
    return data


if __name__ == '__main__':
    csv_data = {
    "down_date":"11",
    "Conference_name": "",
    "Issue": "1",
    "Cited_by": "2",
    "Authors": "Joris C.",
    "Open_Access": "",
    "Document_Type": "Article",
    "Source": "Scopus",
    "Sponsors": "",
    "Page_end": "134",
    "Funding_Details": "",
    "CODEN": "ATRPA",
    "ISBN": "",
    "Authors_with_affiliations": "Joris C.",
    "Publication_Stage": "Final",
    "DOI": "10.1016/S0003-5521(02)01084-1",
    "Art._No.": "",
    "EID": "2-s2.0-18744398995",
    "Page_start": "99",
    "Abstract": "Ardèche, a department of Rhone Alpes region, is rich in prehistoric sites belonging to a very large chronological period dated back to 350 000 years ago. But, the prehistory of the region has been unknown for a long time, mainly, because of its distance from traditional centres of research. Jean Combier, in his abstract dated 1967, defined for the first time Upper Palaelolithic stages: only towards the acquisition of new data, we are now able to suggest a new evolution for the Magdalenian from its origins to the Alleröd climatic episod. To define Ardèche originally within the Magdalenian context, we have compared its lithic industries with those of the Adaouste Cave oriental sites, the Cornille rock shelter and of the Gazel cave in the Aude western part. Ardèche Magdalenian dwelling is peculiar compared to the South West of France. Badegoulian has been substituted by a Mediterranean Facies culture rich in bladelets, the Salpestrian. This facies limited in its geographic extention to Gard and Ardèche, evolves gradually in situ gaining Magdalenian elements (such as backed bladelets and dihedral burins) giving birth to the transitory lithic complex of Huguenots and Baume d'Oullins Cave. An established Magdalenian is certified in the Blanchisserie camp, within a cold climatic context dated back to circa 16 000 years ago. Although the lithic industry is dominated by dihedral burins and backed bladeletse it is also characterised by some archaic features (such as keel endscrapers, transverse burins and scalene bladelets). The upper Magadalenian with bone harpoons appears soon in our region, in the Colombier rock shelter, in a fairly temperate climatic context dated according to 14C back to circa 14 000 BP. We could identify six stages within the evolution of this Upper Magdalenian, which are attested in the Colombier, Ebbou and Deux Avens Caves and in the Colombier rock shelter that has been occupied during several periods. The Magdalenian gradually changed loosing his most typical elements, the bladelets and burins supremacy has been substituted by Azilian elements (such as short endscrapers and curved backed points). But even if the Azilian process happens very early (before 12 500 BP) the Magdalenian, in its fundamental features, never disappears completely and it has never been substituted by classic Azilian. After Alleröd appears a culture characterised by the recovery of Magdalenian features similar to the Epimagdalenian defined by D. Sacchi in Gazel. The described evolution can be compared, as regard to its upper stages, to that of several sites of Rhone region as well as of the North West of France, which allow to define a culturally homogeneous province having the Rhone corridor with Ardèche as its Southern border. At the end of Palaeolithic this province broke up and Ardèche opened to the South and the Mediterranean from where seems to come the retouched large blade facies and endscrapers attested by the Colombier rock shelter dating back to 12 150 BP. © 2002 Éditions scientifiques et médicales Elsevier SAS. All rights reserved.",
    "ISSN": "00035521",
    "Manufacturers": "",
    "Chemicals/CAS": "",
    "Title": "The Magdalenian assemblages from Ardèche (France) in the Mediterranean Basin context",
    "Publisher": "",
    "Page_count": "35",
    "Tradenames": "",
    "Abbreviated_Source_Title": "Anthropologie",
    "Authors_ID": "6603419689",
    "Author_full_names": "Joris, Cinzia (6603419689)",
    "Index_Keywords": "France; archaeology; Paleolithic; tool use",
    "Source_title": "Anthropologie",
    "Year": "2002",
    "Molecular_Sequence_Numbers": "",
    "Affiliations": "Aoste 11010, Fraz. Cognein 50, Italy",
    "Volume": "106",
    "Conference_date": "",
    "Editors": "",
    "References": "Affolter J., Et al., Monruz 6. Une nouvelle station magdalénienne au bord du lac de Neuchâtel, Archéologie Suisse, 17, 3, pp. 94-104, (1994); Allain J., Le Site Paléolithique De La Pierre Aux Fées. Livret Guide Excursion A 1, Sud Du Bassin Parisien, (1976); Barbaza M., Le Magdalénien supérieur final et l'Azilien dans les Pyrénées centrales. La grotte-abri du Moulin à Troubat (Hautes Pyrénées) et son contexte, Pyrénées préhistoriques, Arts Et Sociétés. Actes Du 118° Congrès National Des Sociétés Historiques Et Scientifiques, (1996); Barbaza M., L'Azilien des Pyrénées dans le contexte des cultures de la fin du Tardiglaciaire entre France et Espagne, Bull. Soc. Préh. Fr., 94, 3, pp. 315-318, (1997); Binzt P., Les grottes Jean-Pierre 1 et 2 à Saint Thibaud de Couz (Savoie), Deuxième Partie : La Culture Matérielle, Gallia Préhistoire, 37, pp. 155-328, (1996); Bosinski G., Le site magdalénien de Gönnersdorf, Préhistoire Ariégcoise, 24, pp. 55-72, (1973); Boucquet A., Lequate P., La grotte des Fréydières à St. Agnan en Vercorss (Drôme). Gisement du Magdalénien final, Bull. Soc. Préh. Fr., 70, pp. 324-329, (1973); Bracco J.P., Et al., L'industrie lithique épigravettienne de St. Antoine Locus 2 (Vitrolles, Hautes Alpes) l analyse, Paleo, 9, pp. 221-244, (1997); Brochier J.E., Brochier J.L., L'art mobilier de deux nouveaux gisements magdaléniens à St. Nazaire en Royans (Drôme), Études Préhistoriques, 4, pp. 1-12, (1973); Celerier G., L'abri sous roche de Pont d'Ambon à Bourdeilles (Dordogne), Gallia Préhistoire, 35, pp. 1-137, (1993); Chollet A., Le Magdalénien in Le temps de la préhistoire, Société. Préhistorique Française, pp. 296-297, (1989); Collin P., Jardon Giner P., Travail De La Peau Avec Des Grattoirs Emmanchés, 1, pp. 105-118, (1990); Combier J., Le Paléolithique De L'Ardèche, 4, (1967); Combier J., Floss H., Nouvelles Recherches Sur Le Site Paléolithique Final De Varennes-lès-Macon (Saône Et Loire), 1, pp. 77-79, (1994); David S., La fin du Paléolithique supérieur en Franche Comté, Gallia Préhistoire, 38, pp. 111-248, (1996); Delpech F., L'environnement Animal Des Magdaléniens. Le Magdalénien En Europe, 38, pp. 5-30, (1989); Desbrosse R., L'abri Gay à Poncin (Ain) Nouveau Gisement Azilien Du Bassin Rhodanien, 1974, pp. 123-129, (1977); Escalon de Fonton M., Onoratini G., L'abri Cornille à Istres (Bouches Du Rhône), (1977); Escalon de Fonton M., Un Nouveau Faciès Du Paléolithique Supérieur Dans La Grotte De La Salpétrière (Remoulins, Gard), 1964, pp. 405-421, (1964); Fagnart J.P., Les industries lithiques du Paléolithique supérieur dans le Nord de la France, Revue Archéologique De Picardie Numéro Spécial, (1988); Haid N., Margerand I., Les lamelles à bord abattu magdaléniennes de la grotte des Romains à Pierre-Châtel (Virignin, Ain, France), L'Anthropologie, 100, 1, pp. 42-54, (1996); Leesch D., Le Paléolithique supérieur récent. La Suisse du Paléolithique à l'aube du Moyen Age, Paléolithique Et Mésolithique SPM, 1, pp. 153-164, (1993); Lorblanchet M., Caractères originaux du Magdalénien en Quercy, Le Magdalénien en Europe, Actes Du Colloque De Mayence 1987 E.R.A.U.L., 38, pp. 239-252, (1989); Moutel P., Escola M., Lang L., Seara F., Le niveau magdalénien de l'abri sud de la Baume Noire de Frétigney, Haute-Saône. Le Magdalénien en Europe, Actes Du Colloque De Mayence, E.R.A.U.L., 38, pp. 159-176, (1989); Olive M., Une Habitation Magdalénienne D'Etiolles, 20, (1988); Onoratini G., Préhistoric, Sédiments, Climats Du Würm III à L'Holocène Dans Le Sud-est De La France, 1, (1982); Onoratini G., Joris C., Le campement salpétrien de la Rouvière, Ardèche Archéologie, 12, pp. 9-22, (1995); Otte M., Le Paléolithique supérieur de Belgique, L'Anthropologie, 87, pp. 291-321, (1983); Pelegrin J., Technologie Lithique, (1995); Pion G., L'abri de la Fru à Saint-Cristophe (Savoie), Gallia Préhistoire, 32, pp. 65-123, (1990); Rigaud J.P., Etude préliminaire des industries magdaléniennes de l'abri du Flageolet II, commune de Bézénac (Dordogne), Bull. Soc. Préh. Fr., 67, 2, (1970); Rigaud J.P., Lenoir M., Delpech F., Fouilles de sauvetage dans le gisement magdalénien de Fongaban. Commune de Saint-Emilion (Gironde), L'Anthropologie, 76, 7-8, pp. 595-630, (1972); Sacchi D., Le Paléolithique Supérieur Du Languedoc Occidental Et Du Roussillon, 284, 21 SUPPL., (1986); Schmider B., Marsangy, Un Campement Des Derniers Chasseurs Magdaléniens Sur Les Bords De L'Yonne, 55, (1992); Simonnet R., L'abri sous roche de Rhodes II et la question de l'Azilien dans les Pyrénées françaises. Note préliminaire, Bull. Soc. Préh. Fr., 64, pp. 175-186, (1967); Sonneville Bordes D., Deffarge R., Lames retouchées Magdaléniennes du Morin (Gironde), Zephyrus, 25, pp. 96-105, (1974); Thevenin A., L'Azilien et les cultures à pointes à dos courbe : Esquisse géographique et chronologique, Bull. Soc. Préh. Fr., 94, 3, pp. 393-411, (1997)",
    "Conference_code": "",
    "Conference_location": "",
    "Author_Keywords": "Ardèche; Epipalaeolithic; Evolution; Magdalenian; Technology; The stone tools",
    "PubMed_ID": "",
    "Language_of_Original_Document": "English",
    "Funding_Texts": "",
    "Link": "https://www.scopus.com/inward/record.uri?eid=2-s2.0-18744398995&doi=10.1016%2fS0003-5521%2802%2901084-1&partnerID=40&md5=a91da85a6784b7fba9d9fd991054199d",
    "Correspondence_Address": "C. Joris; Aoste 11010, Fraz. Cognein 50, Italy; email: bibliostpierre@netvallee.it"
  }
    data = parse_scopusjournal_article_csv(csv_data,None)
    print(json.dumps(data,ensure_ascii=False))
