import lxml.html as lh
from urllib import parse
import requests
import time
import json
import os

PAPER_FILE_NAME_FORMAT = "({year}-{venue}) {title}"
DBLP_API_URL = "https://dblp.uni-trier.de/search/publ/api"
DEFAULT_REQUEST_KWARGS = {
    "headers": {"user-agent":
                    """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"""
                },
    "timeout": 20
}


class Paper:
    def __init__(self):
        self.author = None
        self.title = None
        self.year = None
        self.venue = None
        self.json_info = None

    def to_file_name(self):
        return f"{PAPER_FILE_NAME_FORMAT.format_map(self.__dict__)}.pdf"

    def __str__(self):
        return self.to_file_name()

    def __repr__(self):
        return self.to_file_name()


class SearchResult:
    def __init__(self):
        self.matched_num = 0
        self.paper_list = []

    def add_paper(self, paper):
        self.paper_list.append(paper)

    def show(self):
        if len(self.paper_list):
            print(f"\n匹配文献数量 : {self.matched_num}")
            print("文献列表 : ")
            for i, paper in enumerate(self.paper_list):
                print(f"  <{i + 1}> {paper}")
        else:
            print("\n没有匹配到文献，请更换搜索关键词")


class DBLPDownloader:
    def __init__(self):
        self.output_dir = None
        self.sci_hub_url = None
        self.api_url = DBLP_API_URL
        self.default_request_kwargs = DEFAULT_REQUEST_KWARGS

    @staticmethod
    def welcome():
        print("########################################")
        print("#########  坤哥制造，放心良药  #########")
        print("########################################")

    def run(self):
        print("\n$--------------------------------------$")
        search_keyword = self.input_search_keyword()
        json_result = self.get_search_json_result(search_keyword)
        search_result = self.parse_search_result(json_result)
        if self.confirm_download(search_result):
            succeed_paper_list = []
            failed_paper_list = []
            n = len(search_result.paper_list)
            for i, paper in enumerate(search_result.paper_list):
                print(f"\n<{i + 1} / {n}> ", end='')
                is_success = self.download_paper(paper)
                if is_success:
                    succeed_paper_list.append(paper)
                else:
                    failed_paper_list.append(paper)
            self.report_download_result(succeed_paper_list, failed_paper_list)

    @staticmethod
    def input_search_keyword():
        search_keyword = input("\n请输入搜索关键词：")
        return search_keyword.strip()

    def get_search_json_result(self, search_keyword):
        json_text = self._request_text(url=self.api_url,
                                       params={"q": search_keyword, "h": "1000", "format": "json"})
        return json.loads(json_text)['result']

    @staticmethod
    def parse_search_result(json_result):
        hits_result = json_result['hits']
        sr = SearchResult()
        sr.matched_num = int(hits_result['@total'])
        if 'hit' in hits_result:
            for hit in hits_result['hit']:
                hit_info = hit['info']
                paper = Paper()
                paper.author = DBLPDownloader._join_author_name(hit_info['authors']['author'])
                paper.title = DBLPDownloader._clean_title(hit_info['title'])
                paper.venue = DBLPDownloader._clean_venue(hit_info.get('venue'))
                paper.year = hit_info['year']
                paper.json_info = hit_info
                sr.add_paper(paper)
        return sr

    def confirm_download(self, search_result):
        confirm = False
        search_result.show()
        if len(search_result.paper_list):
            confirm = input("确认下载请输入y : ").lower() == "y"
        if confirm:
            self._input_output_dir()
            print("\n开始下载文献...\n")
        else:
            print("\n文献下载取消...\n")
        return confirm

    def download_paper(self, paper):
        paper_info = paper.json_info
        output_filename = paper.to_file_name()
        if self._is_paper_exist(paper):
            print(f"文献已存在 : {paper}")
            return True
        print(f"开始下载文献 : {paper}")
        is_success = False
        try:
            ee_url = paper_info.get('ee', 'undefined')
            is_open = paper_info.get('access', 'closed') == 'open'
            if "doi" in ee_url:
                if is_open:
                    is_success = self._download_from_doi(ee_url, output_filename)
                else:
                    is_success = self._download_by_sci_hub(ee_url[16:], output_filename)
            elif "arxiv" in ee_url:
                is_success = self._download_from_arxiv(ee_url, output_filename)
            elif "aaai" in ee_url:
                is_success = self._download_from_aaai(ee_url, output_filename)
            elif is_open and ee_url.lower().endswith(".pdf"):
                self._download_pdf(ee_url, output_filename)
                is_success = True
            else:
                print(f"    暂不支持该ee url : {ee_url}")
        except Exception as e:
            print(f"    文献下载异常 : {e}")
        if is_success:
            print(f"    下载成功！")
        return is_success

    def report_download_result(self, succeed_paper_list, failed_paper_list):
        n1, n2 = len(succeed_paper_list), len(failed_paper_list)
        n = n1 + n2
        print(f"\n文献下载报告：")
        print(f"    文献总数 : {n}")
        print(f"    成功数量 : {n1} ({round(n1 / n, 2)}%)")
        print(f"    失败数量 : {n2} ({round(n2 / n, 2)}%)")
        if n2:
            print(f"    失败文献 : ")
            for i, paper in enumerate(failed_paper_list):
                print(f"        <{i + 1}/{n2}> {paper}")
            failed_paper_json_info_list = [p.json_info for p in failed_paper_list]
            failed_report_filename = f"failed_report_{time.strftime('%Y%m%d-%H-%M-%S')}.json"
            output_filepath = os.path.join(self.output_dir, failed_report_filename)
            with open(output_filepath, "wt") as _w:
                json.dump(failed_paper_json_info_list, _w)
            print(f"    失败文献报告已写入 : {output_filepath}")

    def _input_output_dir(self):
        self.output_dir = input("\n设置文件写出目录(默认当前目录) : ") or "."

    def _input_sci_hub_url(self):
        self.sci_hub_url = input("\n输入sci-hub网址 : ")
        print()

    @staticmethod
    def _join_author_name(json_author):
        if isinstance(json_author, list):
            return "&".join((item['text'] for item in json_author))
        elif isinstance(json_author, dict):
            return json_author['text']
        else:
            raise ValueError(f"can't parse such json_author : {json_author}")

    @staticmethod
    def _clean_title(title_str):
        return title_str.rstrip('.').replace(":", "").replace("?", "").replace(" - ", "-")

    @staticmethod
    def _clean_venue(venue):
        if venue is None:
            return "Unknown"
        elif isinstance(venue, list):
            return "-".join(venue)
        else:
            return venue

    def _is_paper_exist(self, paper):
        output_filepath = os.path.join(self.output_dir, paper.to_file_name())
        return os.path.exists(output_filepath)

    def _request_text(self, url, **kwargs):
        kwargs = dict(self.default_request_kwargs, **kwargs)
        respond = requests.get(url=url, **kwargs)
        if respond.status_code != 200:
            raise Exception(f"request error for '{respond.url}' : code={respond.status_code}")
        text = respond.text
        respond.close()
        return text

    def _request_bytes(self, url, **kwargs):
        kwargs = dict(self.default_request_kwargs, **kwargs)
        respond = requests.get(url=url, **kwargs)
        if respond.status_code != 200:
            raise Exception(f"request error for '{respond.url}' : code={respond.status_code}")
        content = respond.content
        respond.close()
        return content

    def _download_pdf(self, pdf_url, output_filename):
        pdf_bytes = self._request_bytes(pdf_url)
        os.makedirs(self.output_dir, exist_ok=True)
        output_filepath = os.path.join(self.output_dir, output_filename)
        with open(output_filepath, "wb") as _w:
            _w.write(pdf_bytes)

    def _download_by_sci_hub(self, doi, output_filename):
        if self.sci_hub_url is None:
            self._input_sci_hub_url()
        request_url = parse.urljoin(self.sci_hub_url, doi)
        html_text = self._request_text(request_url)
        html_obj = lh.document_fromstring(html_text)
        lookup_list = html_obj.xpath("//embed[@id='pdf']")
        if not len(lookup_list):
            print(f"    文献url解析失败 : {request_url}")
            return False
        pdf_url = lookup_list[0].get("src")
        if not pdf_url.startswith("http"):
            pdf_url = parse.urljoin("https://", pdf_url)
        self._download_pdf(pdf_url, output_filename)
        return True

    def _download_from_arxiv(self, arxiv_url, output_filename):
        pdf_url = arxiv_url.replace("/abs/", "/pdf/")
        self._download_pdf(pdf_url, output_filename)
        return True

    def _download_from_doi(self, ee_url, output_filename):
        html_text = self._request_text(ee_url)
        html_obj = lh.document_fromstring(html_text)
        lookup_list = html_obj.xpath("//a[@id='toolbar__dcNavSmall']")
        if not len(lookup_list):
            print(f"    文献url解析失败 : {ee_url}")
            return False
        pdf_url = lookup_list[0].get("href")
        self._download_pdf(pdf_url, output_filename)
        return True

    def _download_from_aaai(self, ee_url, output_filename):
        html_text = self._request_text(ee_url)
        html_obj = lh.document_fromstring(html_text)
        lookup_list = html_obj.xpath("//a[@class='obj_galley_link pdf']")
        if not len(lookup_list):
            print(f"    文献url解析失败 : {ee_url}")
            return False
        pdf_url = lookup_list[0].get("href")
        self._download_pdf(pdf_url, output_filename)
        return True


if __name__ == '__main__':
    downloader = DBLPDownloader()
    downloader.welcome()
    while True:
        DBLPDownloader().run()
