import argparse
import os
import sys
import urllib3
import xlrd
import pandas as pd
from xml.etree import ElementTree as ET


def init_par():
    argument = argparse.ArgumentParser()
    argument.add_argument('--db', help='Entrez database name', type=str, default='pubmed')
    argument.add_argument('-o', '--output', help='output results', default='stdout')
    argument.add_argument('-k', '--key-word', help='query key word', required=True)
    argument.add_argument('-t', '--time-range', help='time range. eg:2015-2019', default=2019-2020)
    argument.add_argument('-f', '--field', help='search field. eg:title', default='AllFields')
    argument.add_argument('-r', '--retmax', help='max number of querying results', default=20, type=int)
    argument.add_argument('-d', '--datatype', help='data type. eg:pdat', default='pdat')
    argument.add_argument('--cas', help='Chinese academy of sciences journal classfication')
    args = argument.parse_args()
    return args


def create_url(key_word, db, time_range, field, data_type, retmax):
    API_KEY = 'api_key=c851472abb78ea7c6af492f5935ba9172c08'
    eutils_prefix = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?'
    db_query = 'db={}'.format(db)
    term_query = 'term={}'.format(key_word.replace(' ', '%20'))
    field_query = 'field={}'.format(field)
    date = time_range.split('-')
    mindate = 'mindate={}'.format(date[0])
    maxdate = 'maxdate={}'.format(date[1])
    data_type = 'datatype={}'.format(data_type)
    retmax = 'retmax={}'.format(retmax)
    parameters = '&'.join([db_query,term_query, field_query, data_type, mindate, maxdate, retmax, API_KEY])
    url = eutils_prefix + parameters
    return url


def request_url(url):
    http = urllib3.PoolManager(timeout=5)
    res = http.request('GET', url, retries= 5)
    xml = res.data.decode()
    root = ET.XML(xml)
    pubmed_id = []
    for child in root:
        if child.tag == 'IdList':
            for id in child:
                pubmed_id.append(id.text)
    return pubmed_id


def get_pubmed_summary(pubmed_id):
    url_prefix = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&'
    id = 'id={}'.format(','.join(pubmed_id))
    parameters = '&'.join([id, 'api_key=c851472abb78ea7c6af492f5935ba9172c08'])
    url = url_prefix + parameters
    http = urllib3.PoolManager(timeout= 10)
    summary = http.request('GET', url, retries = 5)
    pubmed_dict = {}
    xml = summary.data.decode()
    root = ET.XML(xml)
    for DocSum in root:
        for detail in DocSum:
            if detail.tag == 'Id':
                doc_id = detail.text
                pubmed_dict[doc_id] = {}
        for detail in DocSum.iter('Item'):
            pubmed_dict[doc_id][detail.attrib['Name']] = detail.text
    return pubmed_dict


def parse_cas(cas_excel):
    cas_file=pd.ExcelFile(cas_excel)
    table=cas_file.parse('Sheet1')
    table = table
    table = table.to_dict(orient='dict')
    return table


def output_res(cas_table, pubmed_dict, output):
    if output == 'stdout' and cas_table:
        sys.stdout.write('PubID\tTitle\tPubType\tPubDate\tDOI\tJournalName\tISSN\tESSN\tSubject\tIF\tCASClassfication')
    elif output == 'stdout' and not cas_table:
        sys.stdout.write('PubID\tTitle\tPubType\tPubDate\tDOI\tJournalName\tISSN\tESSN')
    elif output != 'stdout' and cas_table:
        out_file = open(output, 'w+')
        out_file.write('PubID\tTitle\tPubType\tPubDate\tDOI\tJournalName\tISSN\tESSN\tSubject\tIF\tCASClassfication\n')
    elif output != 'stdout' and not cas_table:
        out_file = open(output, 'w+')
        out_file.write('PubID\tTitle\tPubType\tPubDate\tDOI\tJournalName\tISSN\tESSN\n')
    table = cas_table
    for i in pubmed_dict:
        try:
            ISSN = pubmed_dict[i]['ISSN']
        except KeyError:
            ISSN = 'NA'
        if not ISSN:
            ISSN = 'NA'
        try:
            ESSN = pubmed_dict[i]['ESSN']
        except KeyError:
            ESSN = 'NA'
        if not ESSN:
            ESSN = 'NA'
        try:
            journal_name = pubmed_dict[i]['FullJournalName']
        except KeyError:
            journal_name = 'NA'
        if not journal_name:
            journal_name = 'NA'
        try:
            DOI = pubmed_dict[i]['DOI']
        except KeyError:
            DOI = 'NA'
        if not DOI:
            DOI = 'NA'
        try:
            pubtype = pubmed_dict[i]['PubType']
        except KeyError:
            pubtype = 'NA'
        if not pubtype:
            pubtype = 'NA'
        try:
            PubDate = pubmed_dict[i]['PubDate']
        except KeyError:
            PubDate = 'NA'
        if not PubDate:
            PubDate = 'NA'
        try:
            Title = pubmed_dict[i]['Title']
        except KeyError:
            Title = 'NA'
        if not Title:
            Title = 'NA'
        if table:
            if ISSN in list(table['ISSN'].values()):
                index = list(table['ISSN'].values()).index(ISSN)
                subject = table['学科'][index]
                IF = table['影响因子'][index]
                classfication = table['分区'][index]
            elif ESSN in list(table['ISSN'].values()):
                index = list(table['ISSN'].values()).index(ESSN)
                subject = table['学科'][index]
                IF = table['影响因子'][index]
                classfication = table['分区'][index]
            elif journal_name in list(table['期刊名称'].values()):
                index = list(table['期刊名称'].values()).index(journal_name)
                subject = table['学科'][index]
                IF = table['影响因子'][index]
                classfication = table['分区'][index]
            else:
                subject = IF = classfication = 'NA'
            if output == 'stdout':
                sys.stdout.write('\t'.join([i, Title, pubtype, PubDate, DOI, journal_name, ISSN, ESSN, subject, str(IF), classfication]))
            else:
                out_file.write('\t'.join([i, Title, pubtype, PubDate, DOI, journal_name, ISSN, ESSN, subject, str(IF), classfication])+'\n')
        else:
            if output == 'stdout':
                sys.stdout.write('\t'.join([i, Title, pubtype, PubDate, DOI, journal_name, ISSN, ESSN]))
            else:
                out_file.write('\t'.join([i, Title, pubtype, PubDate, DOI, journal_name, ISSN, ESSN])+'\n')
    out_file.close()
    # return pdf_list


if __name__ == "__main__":
    args = init_par()
    query_url = create_url(args.key_word, args.db, args.time_range, args.field, args.datatype, args.retmax)
    pubmed_id = request_url(query_url)
    pubmed_dict = get_pubmed_summary(pubmed_id)
    if args.db == 'pubmed' and args.cas:
        cas_table = parse_cas(args.cas)
    else:
        pass
    if args.cas:
        output_res(cas_table, pubmed_dict, args.output)
    else:
        output_res('', pubmed_dict, args.output)
    # for i in pubmed_dict:
    #     print(pubmed_dict[i]['FullJournalName'])
    #     print(pubmed_dict[i]['DOI'])