import sys
import argparse
import urllib3
import subprocess
import multiprocessing as mp
from lxml import etree


def init_par():
    argument = argparse.ArgumentParser()
    argument.add_argument('-i', '--input', help='input query results', required=True)
    argument.add_argument('-o', '--out-dir', help='output dir', default='pdf_out')
    argument.add_argument('-t', '--threads', help='download threads', default=1)
    argument.add_argument('-c', '--cas-class', help='download class [default = Q1]', default='Q1')
    args = argument.parse_args()
    return args


def parse_input_file(input_file):
    try:
        input = open(input_file, 'r')
    except FileExistsError:
        sys.stderr.write('Cannot open {} . Please check your input file.')
        exit(1)
    lines = input.readlines()
    header = lines[0].strip().split('\t')
    for i in header:
        if i == 'PubID':
            pubid_idx = header.index(i)
        elif i == 'Title':
            title_idx = header.index(i)
        elif i == 'DOI':
            doi_idx = header.index(i)
        elif i == 'CASClassfication':
            cas_idx = header.index(i)
    pdf_dict = {}
    for i in range(1, len(lines)):
        con = lines[i].strip().split('\t')
        pubid = con[pubid_idx]
        title = con[title_idx]
        doi = con[doi_idx]
        cas = con[cas_idx]
        if doi != 'NA':
            pdf_dict[pubid] = {'title': title, 'doi': doi, 'cas': cas}
        else:
            sys.stderr.write("Cannot get {}'s doi. This paper may be not available on the internet.")
    return pdf_dict


def fetch_pdf(pdf_dict, output, threads, down_class):
    base_url = 'www.sci-hub.ren/'
    pool = mp.Pool(processes=int(threads))
    class_list = down_class.split('-')
    count = 0
    down_list = []
    all_list_len = len(list(pdf_dict.keys()))
    for i in list(pdf_dict.keys()):
        if pdf_dict[i]['cas'] in class_list:
            count += 1
            down_list.append(i)
    if count == 0:
        sys.stderr.write('No paper will be download under {} class\n'.format(down_class))
        exit()
    else:
        sys.stderr.write('{} of {} papers will be download.\n'.format(str(count), str(all_list_len)))
    for i in down_list:
        url = base_url + pdf_dict[i]['doi']
        http = urllib3.PoolManager(timeout=10)
        res = http.request('GET', url, retries= 5)
        html = res.data.decode()
        html = etree.HTML(html)
        res = html.xpath('/html/body/div[1]/div[1]/ul/li[2]/a/@onclick')
        fetch_url = res[0].replace("location.href='", '').replace("'","").replace('\/','/')
        sys.stdout.write(fetch_url)


if __name__ == "__main__":
    args = init_par()
    pdf_dict = parse_input_file(args.input)
    fetch_pdf(pdf_dict, args.out_dir, args.threads, args.cas_class)
