# -*- encoding:utf-8 -*-
import requests
from bs4 import BeautifulSoup


def doi_download(url, filename):
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.url.find("crossref.org") > 0:
        result = crossref_download(url, filename)
    elif res.url.find("ijert.org") > 0:
        result = ijert_download(url, filename)
    if res.url.find("springer.com") > 0:
        result = springer_download(url, filename)
    if res.url.find("sciencedirect.com") > 0:
        result = sciencedirect_download(url, filename)
    elif res.url.find("spiedigitallibrary.org") > 0:
        result = spiedigitallibrary_download(url, filename)
    elif res.url.find("tandfonline.com") > 0:
        result = tandfonline_download(url, filename)
    else:
        result = False
    if result:
        return result
    else:
        print("download failure , {} : {} , {}".format("doi", url, filename))
        return False


def springer_download(url, filename):
    springer_website = "http://link.springer.com"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select(".c-pdf-download a")
        if len(nodes) == 0:
            nodes = soap.select(".test-bookpdf-link")
            pdf_url = springer_website + nodes[0]['href']
        else:
            pdf_url = nodes[0]['href']
        _pdf_download(pdf_url, filename)
        return True
    return False


def sciencedirect_download(url, filename):
    sciencedirect_website = "https://www.sciencedirect.com/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select(".c-pdf-download a")
        pdf_url = sciencedirect_website + nodes[0]['href']
        _pdf_download(pdf_url, filename)
        return True
    return False


def crossref_download(url, filename):
    crossref_website = "http://mr.crossref.org/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select("td a")
        for node in nodes:
            if node["href"].find("ieee") > 0:
                return ieee_download(node["href"], filename)
    return False


def ijert_download(url, filename):
    ijert_website = "http://www.ijert.org/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select(".fancybox-pdf")
        pdf_url = nodes[0]['href']
        _pdf_download(pdf_url, filename)
        return True
    return False


def spiedigitallibrary_download(url, filename):
    spiedigitallibrary_website = "http://www.spiedigitallibrary.org/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select(".DownloadSaveButton1")
        pdf_url = spiedigitallibrary_website + nodes[0]['href']
        _pdf_download(pdf_url, filename)
        return True
    return False


def tandfonline_download(url, filename):
    tandfonline_website = "https://www.tandfonline.com"
    res = requests.get(url)
    res.encoding = 'utf-8'
    if res.status_code == 200:
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select(".show-pdf")
        pdf_url = tandfonline_website + nodes[0]['href']
        _pdf_download(pdf_url, filename)
        return True
    return False


def s2_download(url, filename):
    assert str(url).endswith(".pdf"), "s2 url:'{}' need to end with '.pdf'"
    _pdf_download(url, filename)
    return True


def acm_download(url: str, filename):
    acm_website = "https://dl.acm.org"
    if url.find("doi") >= 0:
        pdf_url = url[:url.index("acm.org") + 12] + "pdf/" + url[url.index("acm.org") + 12:]
    else:
        res = requests.get(url)
        res.encoding = 'utf-8'
        soap = BeautifulSoup(res.text, 'html.parser')
        nodes = soap.select("#pill-formats a")
        pdf_url = acm_website + nodes[0]['href']
    _pdf_download(pdf_url, filename)
    return True


def dblp_download(url, filename):
    dblp_website = "https://openaccess.thecvf.com/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    nodes = soup.select('dd a')
    pdf_url = dblp_website + nodes[0]['href'][9:]
    _pdf_download(pdf_url, filename)
    return True


def mdpi_download(url, filename):
    dblp_website = "https://www.mdpi.com/"
    res = requests.get(url)
    res.encoding = 'utf-8'
    soap = BeautifulSoup(res.text, 'html.parser')
    nodes = soap.select('.download .UD_ArticlePDF')
    pdf_url = dblp_website + nodes[0]['href']
    _pdf_download(pdf_url, filename)
    return True


def arxiv_download(url, filename):
    assert str(url).endswith(".pdf"), "arxiv url:'{}' need to end with '.pdf'"
    _pdf_download(url, filename)
    return True


def anansi_download(url, filename):
    assert str(url).endswith(".pdf"), "arxiv url:'{}' need to end with '.pdf'"
    _pdf_download(url, filename)
    return True


# todo
def medline_download(url, filename):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    news = soup.select('.full-text-links .full-view .full-text-links-list a')
    url = news[0]['href']
    _pdf_download(url, filename)
    return True


def usenix_download(url, filename):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    news = soup.select('.filed .filed-items .file a')
    pdf_url = news[0]['href']
    _pdf_download(pdf_url, filename)
    return True


def ieee_download(url: str, filename):
    if url.startswith("https://ieeexplore.ieee.org/document/"):
        url = "https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=" + url[url.rindex('/') + 1:]
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    news = soup.select('iframe')
    pdf_url = news[0]['src']
    _pdf_download(pdf_url, filename)
    return True


def _pdf_download(pdf_url, filename):
    out_filename = filename + '.pdf'
    r = requests.get(pdf_url, stream=True)
    with open(out_filename, "wb") as file:
        for chunk in r.iter_content(chunk_size=512):
            file.write(chunk)
    return True


# todo
def type(url: str):
    if url.find("doi"):
        return 'doi'
    elif url.find("ieee"):
        return 'ieee'
    elif url.find("ncbi"):
        return 'medline'
    elif url.find("arxiv"):
        return 'arxiv.org'
    elif url.find("mdpi.com"):
        return 'mdpi'
    elif url.find("www.usenix.org"):
        return 'usenix'


# {'doi', 's2', 'publisher', 'acm', 'dblp', 'arxiv', 'ieee'}
def downloadPaper(url: str, url_type, filename):
    if url.endswith(".pdf"):
        _pdf_download(url, filename)
        return True
    elif url.find("doi.org") > 0:
        doi_download(url, filename)
    else:
        method = url_types.get(url_type)
        if method:
            return method(url, filename)
        else:
            print("download failure , {} : {} , {}".format("other", url, filename))
            return False


url_types = {'s2': s2_download,
             'acm': acm_download,
             'dblp': dblp_download,
             'arxiv': arxiv_download,
             'ieee': ieee_download,
             'anansi': anansi_download,
             'medline': medline_download,
             'mdpi': mdpi_download}

if __name__ == '__main__':
    downloadPaper(
        "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9156497",
        "ieee", "9156497")
