# -*- coding: utf-8 -*-

"""
Sci-API Unofficial API
[Search|Download] research papers from [scholar.google.com|sci-hub.io].

@author zaytoun
@updated by Xuele
"""

import re
import argparse
import hashlib
import logging
import csv

import requests
import urllib3
import allcodes
from bs4 import BeautifulSoup
from retrying import retry

# 运行日志相关
logging.basicConfig()
logger = logging.getLogger('Sci-Hub')
logger.setLevel(logging.DEBUG)
urllib3.disable_warnings()

# constants
SCHOLARS_BASE_URL = \
    [
        'https://pubmed.ncbi.nlm.nih.gov/',
        'https://www.sciencedirect.com/search'
    ]
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0'}
# HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                         'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}


class SciHub(object):
    """
    SciHub class can search for papers on Google Scholars
    and fetch/download papers from sci-hub.io
    """

    def __init__(self):
        self.sess = requests.Session()
        self.sess.headers = HEADERS
        self.available_base_url_list = self._get_available_scihub_urls()
        self.base_url = self.available_base_url_list[0] + '/'

    # 获取可以访问的sci-hub网址
    def _get_available_scihub_urls(self):
        '''
        Finds available scihub urls via http://tool.yovisun.com/scihub/
        '''
        urls = ['https://sci-hub.se']
        # res = requests.get('http://tool.yovisun.com/scihub/')
        # s = self._get_soup(res.content)             # 读取网页内容
        # for a in s.find_all('a', href=True):        # 找出含有 a 的行
        #     if 'sci-hub.' in a['href']:             # 如果该行中有 sci-hub 这个单词
        #         urls.append(a['href'])              # 就添加到可访问网址中
        return urls

    def set_proxy(self, proxy):
        '''
        set proxy for session
        :param proxy_dict:
        :return:
        '''
        if proxy:
            self.sess.proxies = {
                "http": proxy,
                "https": proxy, }

    def _change_base_url(self):
        if not self.available_base_url_list:  # 判断列表是否为空
            raise Exception('Ran out of valid sci-hub urls')
        del self.available_base_url_list[0]  # 删除第一个失效链接
        self.base_url = self.available_base_url_list[0] + '/'  # 设置新的基本下载链接
        logger.info("I'm changing to {}".format(self.available_base_url_list[0]))

    def search1(self, search_url, query, savePath, limit=10, beg_year=1000, ed_year=3000, download=False):
        """
        Performs a query on scholar.google.com, and returns a dictionary
        of results in the form {'papers': ...}. Unfortunately, as of now,
        captchas can potentially prevent searches after a certain limit.
        """
        results = {'papers': []}  # 搜索结果字典
        SearchNums = 0

        # 对不同的论文数据库用不同的方式处理
        if search_url == 0:
            print("数据库：{}".format(SCHOLARS_BASE_URL[0]))
            start = 1
            while True:
                try:
                    datetemp = 'dates.' + str(beg_year) + '-' + str(ed_year)
                    res = self.sess.get(SCHOLARS_BASE_URL[search_url],
                                        params={'term': query, 'filter': datetemp, 'page': start})  # 发送请求
                    print("url:{}".format(res.url))
                    sta = "Status_code:" + str(res.status_code) + ", " + allcodes.get_status(res.status_code)
                    print(sta)
                    if not res.status_code == requests.codes.ok:
                        results['err'] = sta
                        return results

                except requests.exceptions.RequestException as e:
                    results['err'] = 'Failed to complete search with query %s (connection error)' % query
                    return results

                s = self._get_soup(res.content)  # 利用beautifulsoup4获得html数据 保存到s中
                papers = s.find_all('div', class_="docsum-content")  # 从html数据中找出论文列表数据
                print("论文列表数量：" + str(len(papers)))

                # 获取论文列表失败
                if not papers:
                    if 'CAPTCHA' in str(res.content):
                        results['err'] = 'Failed to complete search with query %s (captcha)' % query
                    return results

                # 遍历论文列表，获取论文信息
                for paper in papers:
                    pmid = 0
                    if paper.find('span', class_="docsum-pmid"):  # 论文编号
                        pmid = paper.find('span', class_="docsum-pmid").text
                    print(pmid)

                    PaperPage = self.sess.get(f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/", verify=False)  # 打开文章网址
                    PageInfo = self._get_soup(PaperPage.content)

                    # 判断文章类型
                    if not PageInfo.find('div', class_="article-citation"):
                        continue

                    # 论文名
                    paperName = PageInfo.find('h1', class_="heading-title").text
                    paperName = self._replace_word(paperName)
                    paperName = ' '.join(paperName.split())

                    # 摘要
                    if PageInfo.find('div', class_="abstract-content selected"):
                        abstract = PageInfo.find('div', class_="abstract-content selected").text
                        abstract = self._replace_word(abstract)
                        abstract = ' '.join(abstract.split())
                    else:
                        abstract = "No abstract available"
                    # 期刊
                    ArticleInfo = PageInfo.find('div', class_="article-citation")
                    journal = ArticleInfo.find('button', class_="journal-actions-trigger trigger").text
                    journal = self._replace_word(journal)
                    journal = ' '.join(journal.split())
                    doi = pmid
                    # doi
                    if ArticleInfo.find('span', class_="citation-doi"):
                        doi = ArticleInfo.find('span', class_="citation-doi").text
                        doi = self._replace_word(doi)
                        doi = ' '.join(doi.split())[4:-1]

                    # 出版日期
                    if ArticleInfo.find('span', class_="cit"):
                        date_info = ArticleInfo.find('span', class_="cit").text
                        date_info = date_info[0:date_info.find(';')]
                    else:
                        date_info = 0000

                    # 论文信息 添加到字典中
                    # results['papers'].append({
                    #     'name': paperName,
                    #     'url': f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/",
                    #     'journal': journal,
                    #     'date': date_info,
                    #     'abstract': abstract,
                    #     'DOI': doi,
                    # })

                    writeList = []
                    datas = []
                    datas.append(doi)
                    datas.append(paperName)
                    datas.append(journal)
                    datas.append(date_info)
                    datas.append(abstract)
                    datas.append(f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/")
                    writeList.append(datas)

                    with open(savePath, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerows(writeList)
                        f.close()

                    SearchNums += 1
                    print("已搜索{}篇，还剩{}篇……".format(SearchNums, limit - SearchNums))
                    if SearchNums >= limit:
                        return results

                start += 1
        if search_url == 1:
            print("数据库：{}".format(SCHOLARS_BASE_URL[1]))
            start = 0
            while True:
                try:
                    res = self.sess.get(SCHOLARS_BASE_URL[search_url], params={
                        'qs': query, 'start': start, 'date': str(beg_year) + '-' + str(ed_year)})  # 发送请求
                    print("res:", res)
                    # print("query:", query)
                    # print("url:", res.url)
                    # print("是否允许：", res.ok)

                except requests.exceptions.RequestException as e:
                    results['err'] = 'Failed to complete search with query %s (connection error)' % query
                    return results

                s = self._get_soup(res.content)  # 利用beautifulsoup4获得html数据 保存到s中
                papers = s.find_all('div', class_="result-item-content")  # 从html数据中找出论文列表数据

                if not papers:
                    if 'CAPTCHA' in str(res.content):
                        results['err'] = 'Failed to complete search with query %s (captcha)' % query
                    return results

                for paper in papers:
                    if not paper.find('table'):
                        source = None
                        # 统一使用scihub下载，因此去除pdf链接爬取
                        # pdf = paper.find('span', class_='preview-link')
                        # source = pdf.find('a')['href']
                        link = paper.find('h2')
                        if link.find('a'):
                            source = link.find('a')['href']  # 论文网址
                        else:
                            continue

                        # 期刊名、出版日期
                        paperName = self._replace_word(link.text)
                        paperName = ' '.join(paperName.split())
                        info = paper.find_all('span', class_="preceding-comma")
                        journal_info = info[0].text
                        date_info = info[1].text
                        date_year = int(date_info[-4:])
                        if date_year < beg_year:
                            continue

                        # 摘要  当前网页需要点击下拉键才能显示摘要，不会。所以直接打开新网址获取
                        PaperPage = self.sess.get(f"https://www.sciencedirect.com{source}", verify=False)  # 打开文章网址
                        PageCt = self._get_soup(PaperPage.content)
                        doiLink = PageCt.find('a', class_="doi")  # 定位到DOI
                        doi = doiLink.text[16:]  # 获取DOI号

                        abs_info = PageCt.find('div', class_="abstract author")  # 定位到摘要部分
                        abstract = ""
                        if abs_info:
                            abs_h2 = abs_info.find_all('h2')  # 判断是否有 Abstract
                            abs_h3 = abs_info.find_all('h3')  # 判断是否有 Objectives，Methods and results，Conclusions
                            abs_h3_p = abs_info.find_all('p')  # 对应上面h3的每一节内容
                            if (abs_h2 and abs_h3) or abs_h3:  # 研究性文章，有Objectives，Methods and results，Conclusions
                                for i in range(0, len(abs_h3)):
                                    abstract = abstract + abs_h3[i].text.replace('\n', ' ').replace('\r', ' ') + \
                                               "\n" + abs_h3_p[i].text.replace('\n', ' ').replace('\r', ' ') + "\n"
                                # print("abs_h3", abs_h3)
                            elif abs_h2:  # 普通文章，只有摘要的时候
                                abstract = abs_info.find('p').text.replace('\n', ' ').replace('\r', ' ')
                                # print("ans_h2:",abs_h2)
                            else:
                                continue
                        else:
                            abstract = "No Abstract!"

                        # 论文信息 添加到字典中
                        # results['papers'].append({
                        #     'name': paperName,
                        #     'url': f"https://www.sciencedirect.com{source}",
                        #     'journal': journal_info,
                        #     'date': date_info,
                        #     'abstract': abstract,
                        #     'DOI': doi
                        # })
                        writeList = []
                        datas = []
                        datas.append(doi)
                        datas.append(paperName)
                        datas.append(journal_info)
                        datas.append(date_info)
                        datas.append(abstract)
                        datas.append(f"https://www.sciencedirect.com{source}")
                        writeList.append(datas)
                        print(writeList)

                        with open(savePath, 'a', newline='', encoding='utf-8') as f:
                            writer = csv.writer(f)
                            writer.writerows(writeList)
                        f.close()

                        SearchNums += 1
                        print("已搜索{}篇，还剩{}篇……".format(SearchNums, limit - SearchNums))
                        if SearchNums >= limit:
                            return results
                start += 10
        elif search_url == 2:
            # print("数据库：{}".format(SCHOLARS_BASE_URL[2]))
            results['err'] = "数据库还未添加，请重新选择!"
            return results
        elif search_url == 3:
            # print("数据库：{}".format(SCHOLARS_BASE_URL[3]))
            results['err'] = "数据库还未添加，请重新选择!"
            return results
        elif search_url == 4:
            print("数据库：{}".format(SCHOLARS_BASE_URL[1]))
            start = 0
            while True:
                try:
                    year = str(beg_year) + '0101'
                    res = self.sess.get(SCHOLARS_BASE_URL[search_url],
                                        params={'q': query, 'f_year': year, 'current_page': start})  # 发送请求
                    print("res:", res)
                    # print("query:", query)
                    # print("url:", res.url)
                    # print("是否允许：", res.ok)

                except requests.exceptions.RequestException as e:
                    results['err'] = 'Failed to complete search with query %s (connection error)' % query
                    return results

                s = self._get_soup(res.content)  # 利用beautifulsoup4获得html数据 保存到s中
                papers = s.find_all('div', class_="paper-list-title")  # 从html数据中找出论文列表数据

                # 获取论文列表失败
                if not papers:
                    if 'CAPTCHA' in str(res.content):
                        results['err'] = 'Failed to complete search with query %s (captcha)' % query
                    return results

                # 遍历论文列表，获取论文信息
                for paper in papers:
                    # if not paper.find('table'):
                    pmid = 0
                    if paper.find('a'):
                        pmid = paper.find('a')['href'][1:]  # 论文编号

                    PaperPage = self.sess.get(f"http://www.chinapubmed.net/{pmid}", verify=False)  # 打开文章网址
                    PageInfo = self._get_soup(PaperPage.content)

                    # 论文名、期刊名、出版日期
                    paperName = PageInfo.find('div', class_="entitle").text
                    paperName = self._replace_word(paperName)
                    paperName = ' '.join(paperName.split())
                    abstract = PageInfo.find('div', class_="enabstract").text
                    enInfo = PageInfo.find('div', class_="en_attr")
                    journalInfo = enInfo.find_all('a')
                    journal = journalInfo[1].text
                    otherInfo = enInfo.find_all('span')  # 其他信息
                    date_info = 0
                    date_year = 0
                    doi = 0

                    for info in otherInfo:
                        if info.text[0:4] == "发表日期":
                            date_info = info.text[6:]
                            date_year = int(date_info[0:4])

                        elif info.text == "官网" or info.text == "免费下载":
                            link = info.find('a')['href']
                            index1 = link.find('/')
                            index2 = link.find('/', index1 + 1)
                            index3 = link.find('/', index2 + 1)
                            doi = link[index3 + 1:]
                    if doi == 0:
                        doi = pmid

                    if date_year < beg_year:
                        continue

                    # 论文信息 添加到字典中
                    results['papers'].append({
                        'name': paperName,
                        'url': f"http://www.chinapubmed.net/{pmid}",
                        'journal': journal,
                        'date': date_info,
                        'abstract': abstract,
                        'DOI': doi,
                    })

                    print("已搜索{}篇，还剩{}篇……".format(len(results['papers']), limit - len(results['papers'])))
                    if len(results['papers']) >= limit:
                        return results

                start += 1
        else:
            # print("数据库传入参数有误！")
            results['err'] = "数据库传入参数有误！"
            return results

    def search(self, search_url, searchDic):
        res = self.sess.get(search_url, params=searchDic)  # 发送请求
        print("res->url:", res.url)
        print("get访问结束")
        return res

    @retry(wait_random_min=100, wait_random_max=5000, stop_max_attempt_number=10)  # 装饰器
    def download(self, identifier, destination='', path=None):
        """
        Downloads a paper from sci-hub given an indentifier (DOI, PMID, URL).
        Currently, this can potentially be blocked by a captcha if a certain
        limit has been reached.
        """
        # print("download() -> path:", path)
        data = self.fetch(identifier)
        # print("download() -> data: ", data)

        if not 'err' in data:
            print("not err in data!")
            self._save(data['pdf'], path)
            # print("_save path:", os.path.join(destination, path if path else data['name']))

        return data

    #
    def fetch(self, identifier):
        """
        Fetches the paper by first retrieving the direct link to the pdf.
        If the indentifier is a DOI, PMID, or URL pay-wall, then use Sci-Hub
        to access and download paper. Otherwise, just download paper directly.
        """

        # try:
        url = self._get_direct_url(identifier)
        print("fetch()->url:", url)

        if url is None:
            return {
                'err': 'Failed to fetch pdf with identifier %s (resolved url %s) due to captcha' % (identifier, url)}

        # verify=False is dangerous but sci-hub.io
        # requires intermediate certificates to verify
        # and requests doesn't know how to download them.
        # as a hacky fix, you can add them to your store
        # and verifying would work. will fix this later.
        res = self.sess.get(url, verify=False)
        print("fetch()->res:", res.headers)

        if res.headers['Content-Type'] != 'application/pdf':
            print("No Content-Type: application/pdf")
            self._change_base_url()
            # logger.info('Failed to fetch pdf with identifier %s '
            #                            '(resolved url %s) due to captcha' % (identifier, url))
            # raise CaptchaNeedException('Failed to fetch pdf with identifier %s '
            #                            '(resolved url %s) due to captcha' % (identifier, url))
            return {
                'err': 'Failed to fetch pdf with identifier %s (resolved url %s) due to captcha'
                       % (identifier, url)
            }
        else:
            # print("fetch()->return: fetch返回")
            return {
                'pdf': res.content,
                'url': url,
                'name': self._generate_name(res)
            }

        # except requests.exceptions.ConnectionError:
        #     logger.info('Cannot access {}, changing url'.format(self.available_base_url_list[0]))
        #     self._change_base_url()
        #
        # except requests.exceptions.RequestException as e:
        #     logger.info('Failed to fetch pdf with identifier %s (resolved url %s) due to request exception.'
        #                % (identifier, url))
        #     return {
        #         'err': 'Failed to fetch pdf with identifier %s (resolved url %s) due to request exception.'
        #                % (identifier, url)
        #     }

    # 获取论文下载链接
    def _get_direct_url(self, identifier):
        """
        Finds the direct source url for a given identifier.
        """
        id_type = self._classify(identifier)

        # print("_get_direct_url() -> identifier:", identifier)
        # print("_get_direct_url() -> id_type:", id_type)

        return identifier if id_type == 'url-direct' \
            else self._search_direct_url(identifier)

    # 非直接下载链接时，获取sci-hub的直接下载网址
    def _search_direct_url(self, identifier):
        """
        Sci-Hub embeds papers in an iframe. This function finds the actual
        source url which looks something like https://moscow.sci-hub.io/.../....pdf.
        """
        # print("_search_direct_url()->base_url:", self.base_url )
        print("identifier:{}".format(identifier))
        print("_search_direct_url()->get:", self.base_url + identifier)
        res = self.sess.get(self.base_url + identifier, verify=False)
        print("_search_direct_url()->status_code:", res.status_code)
        print("_search_direct_url()->res:", res)
        s = self._get_soup(res.content)
        # print("_search_direct_url()->s:", s)
        iframe = s.find('iframe')
        # print("_search_direct_url()->iframe:",iframe )
        if iframe:
            return iframe.get('src') if not iframe.get('src').startswith('//') \
                else 'http:' + iframe.get('src')

    # 对输入标示符进行分类  -> 直接下载链接、非下载链接、pmid号、doi号
    def _classify(self, identifier):
        """
        Classify the type of identifier:
        url-direct - openly accessible paper
        url-non-direct - pay-walled paper
        pmid - PubMed ID
        doi - digital object identifier
        """
        if (identifier.startswith('http') or identifier.startswith('https')):
            if identifier.endswith('pdf'):
                return 'url-direct'
            else:
                return 'url-non-direct'
        elif identifier.isdigit():
            return 'pmid'
        else:
            return 'doi'

    def _save(self, data, path):
        """
        Save a file give data and a path.
        """
        # print("_save() -> path:", path)
        with open(path, 'wb') as f:
            f.write(data)

    def _get_soup(self, html):
        """
        Return html soup.
        """
        return BeautifulSoup(html, 'lxml')

    def _generate_name(self, res):
        """
        Generate unique filename for paper. Returns a name by calcuating
        md5 hash of file contents, then appending the last 20 characters
        of the url which typically provides a good paper identifier.
        """
        name = res.url.split('/')[-1]
        name = re.sub('#view=(.+)', '', name)
        pdf_hash = hashlib.md5(res.content).hexdigest()
        return '%s-%s' % (pdf_hash, name[-20:])

    def _replace_word(self, str):
        res = ""
        for i in range(len(str)):
            if (str[i] >= '0' and str[i] <= '9') or \
                    (str[i] >= 'A' and str[i] <= 'Z') or \
                    (str[i] >= 'a' and str[i] <= 'z') or \
                    str[i] == ' ' or str[i] == '-' or str[i] == '.' or str[i] == '/':
                res += str[i]
            else:
                res += ' '
        return res


class CaptchaNeedException(Exception):
    pass


def main():
    sh = SciHub()

    parser = argparse.ArgumentParser(description='SciHub - To remove all barriers in the way of science.')
    parser.add_argument('-d', '--download', metavar='(DOI|PMID|URL)', help='tries to find and download the paper',
                        type=str)
    parser.add_argument('-f', '--file', metavar='path', help='pass file with list of identifiers and download each',
                        type=str)
    parser.add_argument('-s', '--search', metavar='query', help='search Google Scholars', type=str)
    parser.add_argument('-sd', '--search_download', metavar='query',
                        help='search Google Scholars and download if possible', type=str)
    parser.add_argument('-l', '--limit', metavar='N', help='the number of search results to limit to', default=10,
                        type=int)
    parser.add_argument('-o', '--output', metavar='path', help='directory to store papers', default='', type=str)
    parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
    parser.add_argument('-p', '--proxy', help='via proxy format like socks5://user:pass@host:port', action='store',
                        type=str)

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)
    if args.proxy:
        sh.set_proxy(args.proxy)

    if args.download:
        result = sh.download(args.download, args.output)
        if 'err' in result:
            logger.debug('%s', result['err'])
        else:
            logger.debug('Successfully downloaded file with identifier %s', args.download)
    elif args.search:
        results = sh.search(args.search, args.limit)
        if 'err' in results:
            logger.debug('%s', results['err'])
        else:
            logger.debug('Successfully completed search with query %s', args.search)
        print(results)
    elif args.search_download:
        results = sh.search(args.search_download, args.limit)
        if 'err' in results:
            logger.debug('%s', results['err'])
        else:
            logger.debug('Successfully completed search with query %s', args.search_download)
            for paper in results['papers']:
                result = sh.download(paper['url'], args.output)
                if 'err' in result:
                    logger.debug('%s', result['err'])
                else:
                    logger.debug('Successfully downloaded file with identifier %s', paper['url'])
    elif args.file:
        with open(args.file, 'r') as f:
            identifiers = f.read().splitlines()
            for identifier in identifiers:
                result = sh.download(identifier, args.output)
                if 'err' in result:
                    logger.debug('%s', result['err'])
                else:
                    logger.debug('Successfully downloaded file with identifier %s', identifier)

# if __name__ == '__main__':
#     main()
