from langserve import RemoteRunnable
import re, requests, os

DOWN_PATH="./downloads/"
PROXIES = {
        #          [协议]://  [地址]  :[端口]
        "http":  "http://127.0.0.1:7890",  # 再例如  "http":  "http://127.0.0.1:7890",
        "https": "http://127.0.0.1:7890",  # 再例如  "https": "http://127.0.0.1:7890",
    }

def chat_free(human_input, history):
    remote_runnable = RemoteRunnable("http://localhost:8001/arxiv_agent")
    chat_history = [(pair[0], pair[1]) for pair in history]
    ai_response = remote_runnable.invoke({"input": human_input, "chat_history": chat_history})
    return ai_response['output']

def pdf_summary(query):
    pdf_path, info = download_arxiv_(query)
    remote_runnable = RemoteRunnable("http://localhost:8001/arxiv_chain")
    prompt_message = "请你阅读以下学术论文相关的材料，提取摘要，翻译为中文"
    response = remote_runnable.invoke({"question": prompt_message, "url": pdf_path})
    response += "帮我改写，语句更加专业化学术化，只返回摘要;返回格式为：摘要：xxxxxx"
    response_opt = chat_free(response,[])
    return response_opt, pdf_path


def chat_pdf(query, path):
    remote_runnable = RemoteRunnable("http://localhost:8001/arxiv_chain")
    response = remote_runnable.invoke({"question": query, "url": path})
    return response


def download_arxiv_(url_pdf):
    if 'arxiv.org' not in url_pdf:
        if ('.' in url_pdf) and ('/' not in url_pdf):
            new_url = 'https://arxiv.org/abs/'+url_pdf
            print('下载编号：', url_pdf, '自动定位：', new_url)
            # download_arxiv_(new_url)
            return download_arxiv_(new_url)
        else:
            print('不能识别的URL！')
            return None
    if 'abs' in url_pdf:
        url_pdf = url_pdf.replace('abs', 'pdf')
        url_pdf = url_pdf + '.pdf'

    url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs')
    title, other_info = get_name(_url_=url_abs)

    paper_id = title.split()[0]  # '[1712.00559]'
    if '2' in other_info['year']:
        title = other_info['year'] + ' ' + title

    known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI']
    for k in known_conf:
        if k in other_info['comment']:
            title = k + ' ' + title

    download_dir = DOWN_PATH
    os.makedirs(download_dir, exist_ok=True)

    title_str = title.replace('?', '？')\
        .replace(':', '：')\
        .replace('\"', '“')\
        .replace('\n', '')\
        .replace('  ', ' ')\
        .replace('  ', ' ')

    requests_pdf_url = url_pdf
    file_path = download_dir+title_str

    print('下载中')
    proxies = PROXIES
    # 发出请求并下载pdf
    r = requests.get(requests_pdf_url, proxies=proxies)
    with open(file_path, 'wb+') as f:
        f.write(r.content)
    print('下载完成')

    x = "%s  %s %s.bib" % (paper_id, other_info['year'], other_info['authors'])
    x = x.replace('?', '？')\
        .replace(':', '：')\
        .replace('\"', '“')\
        .replace('\n', '')\
        .replace('  ', ' ')\
        .replace('  ', ' ')
    return file_path, other_info


def get_name(_url_):
    import os
    from bs4 import BeautifulSoup
    print('正在获取文献名！')
    print(_url_)


    proxies = PROXIES
    res = requests.get(_url_, proxies=proxies)

    bs = BeautifulSoup(res.text, 'html.parser')
    other_details = {}

    # get year
    try:
        year = bs.find_all(class_='dateline')[0].text
        year = re.search(r'(\d{4})', year, re.M | re.I).group(1)
        other_details['year'] = year
        abstract = bs.find_all(class_='abstract mathjax')[0].text
        other_details['abstract'] = abstract
    except:
        other_details['year'] = ''
        print('年份获取失败')

    # get author
    try:
        authors = bs.find_all(class_='authors')[0].text
        authors = authors.split('Authors:')[1]
        other_details['authors'] = authors
    except:
        other_details['authors'] = ''
        print('authors获取失败')

    # get comment
    try:
        comment = bs.find_all(class_='metatable')[0].text
        real_comment = None
        for item in comment.replace('\n', ' ').split('   '):
            if 'Comments' in item:
                real_comment = item
        if real_comment is not None:
            other_details['comment'] = real_comment
        else:
            other_details['comment'] = ''
    except:
        other_details['comment'] = ''
        print('年份获取失败')

    title_str = BeautifulSoup(
        res.text, 'html.parser').find('title').contents[0]
    print('获取成功：', title_str)

    return title_str+'.pdf', other_details


