# -*- encoding:utf-8 -*-
import requests
import json
import translate
import time
from tqdm import tqdm
import download_pdf
import os
import threading
import re


def objects_to_json_file(objects, file_path):
    with open(file_path, 'w') as file:
        json.dump(objects, file)


def json_file_to_objects(file_path):
    with open(file_path, 'r') as file:
        return json.load(file)


def checkNameValid(name=None):
    """
    检测Windows文件名称！
    """
    if name is None:
        print("name is None!")
        return
    reg = re.compile(r'[\\/:*?"<>|\r\n]+')
    valid_name = reg.findall(name)
    if valid_name:
        for nv in valid_name:
            name = name.replace(nv, "_")
    return name


class SemanticScholar:

    def __init__(self):
        self.headers = {
            "Host": "www.semanticscholar.org",
            "Connection": "keep-alive",
            "Content-Length": "387",
            "Cache-Control": "no-cache,no-store,must-revalidate,max-age=-1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
            "X-S2-UI-Version": "c0398ea30a8a8a5f4c31e7e1c2a65433a7170355",
            "Content-Type": "application/json",
            "Accept": "*/*",
            "Origin": "https://www.semanticscholar.org",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Dest": "empty",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cookie": "tid=rBIAB154euMjsgAICN/wAg==; _ga=GA1.2.976079114.1584954093; compact_serp_results=false; hubspotutk=dbbb5906ecf73095e819af2396102f11; s2_feed_link_ad_dismissed=%7B%22pdp%22%3Atrue%7D; _gcl_au=1.1.1778260446.1600749954; pv2more=1600750141; pv2more10sec=1600750141; s2Exp=new_ab_framework_aa%3Dcontrol%26abstract_highlighter_v2%3D-highlighted_abstract_default_toggle_off%26pdp_top_citation_scorecard_v2%3D-top_citing_with_counts%26search_suggestions%3D-control%26perf_split_by_route%3D-control%26serp_result_tldr_2%3D-tldr%26pdp_promotion_banner_v2%3D-test%26alerts_two_types_relevance_v2%3Drelevance_author%26citation_ranking_v2_3%3Dlambda_0_01; _gid=GA1.2.1388832563.1606736805; __hssrc=1; s2Hist=2020-12-02T00%3A00%3A00.000Z%7C111000010000000000000000100000000000000000000000000010000000000000000001; sid=48fb43c8-fbd3-4d0d-a2ab-d6bbca694130; _gat_gtag_UA_67668211_2=1; _hp2_ses_props.2424575119=%7B%22ts%22%3A1606903745368%2C%22d%22%3A%22www.semanticscholar.org%22%2C%22h%22%3A%22%2F%22%7D; __hstc=132950225.dbbb5906ecf73095e819af2396102f11.1584954100531.1606817813651.1606903748449.39; __hssc=132950225.1.1606903748449; _hp2_id.2424575119=%7B%22userId%22%3A%221966527932255788%22%2C%22pageviewId%22%3A%223800849545711277%22%2C%22sessionId%22%3A%223632169792492966%22%2C%22identity%22%3Anull%2C%22trackerVersion%22%3A%224.0%22%7D; _hp2_props.2424575119=%7B%22feature%3Apdp_entity_relations%22%3Afalse%2C%22experiment%3Anew_ab_framework_aa%22%3A%22control%22%2C%22experiment%3Aautocomplete%22%3Anull%2C%22feature%3Asatisfaction_survey%22%3Afalse%2C%22feature%3Acitations_swap%22%3Afalse%2C%22feature%3Acorona_sample_searches%22%3Afalse%2C%22experiment%3Aaugmented_reader_pdp%22%3Anull%2C%22feature%3Acorona_homepage_link%22%3Afalse%2C%22experiment%3Anew_ab_framework_mock_ab%22%3Anull%2C%22feature%3Aauthor_influence_graph%22%3Atrue%2C%22feature%3Aaugmented_reader%22%3Afalse%2C%22feature%3Alogin_demographics_modal%22%3Atrue%2C%22feature%3Asimilar_papers_pdp%22%3Atrue%2C%22feature%3Aemergency_banner%22%3Afalse%2C%22feature%3Ahubspot_newsletter_form%22%3Atrue%2C%22tid%22%3A%22rBIAB154euMjsgAICN%2FwAg%3D%3D%22%2C%22Is%20Signed%20In%22%3Afalse%2C%22feature%3Alog_heap_landmarks%22%3Afalse%2C%22feature%3Acorona_redirect_to_cord19%22%3Atrue%2C%22feature%3Aminify_js%22%3Atrue%2C%22feature%3Arelated_papers_search_cluster%22%3Atrue%2C%22feature%3Ahomepage_ads%22%3Atrue%2C%22feature%3Ause_fallback_search_cluster%22%3Afalse%2C%22experiment%3Aalerts_aa_test%22%3Anull%2C%22experiment%3Aalerts_two_types_relevance_v2%22%3A%22relevance_author%22%2C%22feature%3Apdp_use_dynamo%22%3Atrue%2C%22feature%3Aresearch_homepage%22%3Afalse%2C%22feature%3Atldr_survey%22%3Atrue%2C%22feature%3Asearch_citations_perf%22%3Afalse%2C%22feature%3Aauthor_claim_on_pdp%22%3Atrue%2C%22feature%3Afeed_satisfaction_survey%22%3Afalse%2C%22feature%3Aserp_swap_at_ten%22%3Afalse%2C%22experiment%3Apdp_promotion_banner_v2%22%3Anull%2C%22experiment%3Acitation_ranking_v2_3%22%3A%22lambda_0_01%22%2C%22feature%3Ahydrate_search_from_ddb%22%3Atrue%2C%22feature%3Ahomepage_ad_tldr%22%3Atrue%2C%22feature%3Ahighly_influential_citations_scorecard%22%3Atrue%2C%22feature%3Ause_fallback_search_reranker_service%22%3Afalse%2C%22feature%3Awith_entitlements%22%3Afalse%2C%22feature%3Ahomepage_ad_browser_ext%22%3Afalse%2C%22feature%3Aresized_figures%22%3Atrue%2C%22feature%3Asuggested_searches_from_campaign%22%3Atrue%2C%22feature%3Apdp_citation_suggestions%22%3Atrue%2C%22experiment%3Aabstract_highlighter_v2%22%3Anull%2C%22experiment%3Apdp_top_citation_scorecard_v2%22%3Anull%2C%22experiment%3Asearch_suggestions%22%3Anull%7D"
        }
        self.request_json = {"queryString": "", "page": 1, "pageSize": 1, "sort": "relevance",
                             "authors": [], "coAuthors": [], "venues": [], "yearFilter": {"min": 2018, "max": 2021},
                             "requireViewablePdf": "false", "publicationTypes": [], "externalContentTypes": [],
                             "fieldsOfStudy": ["computer-science"], "useFallbackRankerService": "false",
                             "useFallbackSearchCluster": "true", "hydrateWithDdb": "true", "includeTldrs": "true",
                             "performTitleMatch": "true", "includeBadges": "true"}

        self.url = 'https://www.semanticscholar.org/api/1/search'
        self.translated_tool = translate.TranslateTool()

    def catch_title(self, keywords="traffic surveillance anomaly", page_count=30):
        """
        从semantic scholar search api中获取的原始json数据 ()
        :param keywords: 搜索关键词
        :param page_count: 搜索的前一定数量的页（每页10条数据）
        :return:
        """
        request_json = self.request_json.copy()
        request_json["queryString"] = keywords
        r = requests.post(self.url,
                          json=request_json,
                          headers=self.headers)
        result = r.json()
        total_pages = result['totalPages']
        page_count = page_count if page_count < total_pages else total_pages
        results_filename = "generation/{}-{}.json".format("-".join(keywords.split(" ")), page_count)
        t = tqdm(total=page_count)
        results = []
        for i in range(page_count):
            t.update(1)
            time.sleep(1)
            request_json = self.request_json.copy()
            request_json["queryString"] = keywords
            request_json["page"] = i + 1
            request_json["pageSize"] = 10
            try:
                r = requests.post(self.url,
                                  json=request_json,
                                  headers=self.headers)
                result = r.json()
                if "results" in result:
                    results.extend(result['results'])
            except Exception:
                print("{} exception".format(i + 1))
        objects_to_json_file(results, results_filename)
        return results_filename, results

    def translate_results(self, object_list, name: str):
        """
        将从semantic scholar search api中获取的原始数据list[dict]中的title和abstract使用百度翻译为中文
        :param object_list: 从semantic scholar search api中获取的原始数据list[dict]
        :param name: 保存的文件名
        :return:
        """
        assert name is not None, "name is not None"
        translated_results = []
        with tqdm(desc="object_list", total=len(object_list)) as t:
            for ind, article in enumerate(object_list):
                try:
                    obj_dict = {}
                    title = article['title']['text']
                    _, ch_title = self.translated_tool.baidu_translate(article['title']['text'])
                    _, abstract = self.translated_tool.baidu_translate(article['paperAbstract']['text'])
                    t.update(1)
                    if len(article['links']) > 0:
                        url = article['links'][0]['url']
                        url_type = article['links'][0]['linkType']
                    else:
                        if article['alternatePaperLinks'] is not None and len(article['alternatePaperLinks']) > 0:
                            url = article['alternatePaperLinks'][0]['url']
                            url_type = article['alternatePaperLinks'][0]['linkType']

                    obj_dict["title"] = title
                    obj_dict["ch_title"] = ch_title
                    obj_dict["abstract"] = abstract
                    obj_dict["url"] = url
                    obj_dict["url_type"] = url_type
                    obj_dict["year"] = article['year']['text']
                    translated_results.append(obj_dict)
                except Exception as e:
                    print("index: {}, exception: {}".format(ind, e))
        objects_to_json_file(translated_results, name)
        return name, translated_results

    @staticmethod
    def getParametersFromTranslated(object_list: list, selected: list = [], parameters: list = []):
        """
        从articles数据中筛选出一些article相关的信息，如title,year等
        :param object_list: 数组，每一个元素是dict
        :param selected: 选中需要查看的元素位置，对应数组中的dict
        :param parameters: 选中需要查看的key,对应数组中dict中的元素
        :return:
        """
        length = len(object_list)
        assert length > 0, "object_list is null"
        all_parameters = dict(object_list[0]).keys()
        assert set(parameters).issubset(set(all_parameters)), "unknown parameter"
        tep_list = []
        if len(selected) == 0:
            tep_list = object_list
        else:
            for i in selected:
                if i < length:
                    tep_list.append(object_list[i])
        selected_results = []
        if len(parameters) == 0:
            selected_results = tep_list
        else:
            for obj in tep_list:
                result_dict = {}
                for parameter in parameters:
                    result_dict[parameter] = obj[parameter]
                selected_results.append(result_dict)
        return selected_results

    @staticmethod
    def getAllUrlTypeSet(object_list):
        type_set = set()
        for article in object_list:
            type_set.add(article["url_type"])
        return type_set

    @staticmethod
    def download(object_list, directory: str = "paper/"):

        def thread_download(obj, path):
            """ for threading"""
            if not os.path.exists(path + ".pdf"):
                if download_pdf.downloadPaper(obj["url"], obj["url_type"], path):
                    print("{} has downloaded".format(path))

        if not os.path.exists(directory):
            os.mkdir(directory)
        for article in object_list:
            filename = article["title"] + "-" + article["ch_title"]
            str(filename).replace(':', '--')
            filename = checkNameValid(filename)
            filename = os.path.join(directory, filename)
            # if filename.startswith("paper/Moving"):
            threading.Thread(target=thread_download, args=(article, filename)).start()
            # thread_download(article, filename)


if __name__ == '__main__':
    scholar = SemanticScholar()
    filename, _ = scholar.catch_title(keywords="traffic anomaly dataset", page_count=2)
    results = json_file_to_objects(filename)
    translated_filename, translated_results = scholar.translate_results(results,
                                                                        filename.split(".")[0] + "_translate.json")

    # selected_ind = [
    #     [0, 1, 2, 3, 6, 8, 9, 11, 14, 19, 22, 31, 42, 43, 44, 68, 133, 159, 180],
    #     [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 17, 18, 19, 26, 36, 43, 68, 82, 87, 95, 110, 116, 137],
    #     [2, 3, 6, 12, 13, 14, 19, 64, 65, 68, 85, 88, 113, 127, 186, 203, 206, 240, 274, 293, 299]
    # ]
    # results = {}
    # a = scholar.getParametersFromTranslated(json_file_to_objects(
    #     "generation/surveillance-anomaly-detect-recognition-20_translate.json"),
    #     selected=selected_ind[0])
    #
    # for obj in a:
    #     results[obj["title"]] = obj
    # a = scholar.getParametersFromTranslated(json_file_to_objects(
    #     "generation/surveillance-anomaly-driver-traffic-20_translate.json"),
    #     selected=selected_ind[1])
    # for obj in a:
    #     results[obj["title"]] = obj
    # a = scholar.getParametersFromTranslated(
    #     json_file_to_objects("generation/traffic-anomaly-video-detection_translate.json"),
    #     selected=selected_ind[2])
    # for obj in a:
    #     results[obj["title"]] = obj
    # objects_to_json_file(list(results.values()), "generation/attention_paper.json")
    # print(json.dumps(list(results.values())))

    object_list = json_file_to_objects("generation/attention_paper.json")
    scholar.download(object_list)
