|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive |
|
from toolbox import CatchException, report_execption, write_results_to_file |
|
|
|
def get_meta_information(url, chatbot, history): |
|
import requests |
|
import arxiv |
|
import difflib |
|
from bs4 import BeautifulSoup |
|
from toolbox import get_conf |
|
proxies, = get_conf('proxies') |
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36', |
|
} |
|
|
|
response = requests.get(url, proxies=proxies, headers=headers) |
|
|
|
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
|
def string_similar(s1, s2): |
|
return difflib.SequenceMatcher(None, s1, s2).quick_ratio() |
|
|
|
profile = [] |
|
|
|
for result in soup.select(".gs_ri"): |
|
title = result.a.text.replace('\n', ' ').replace(' ', ' ') |
|
author = result.select_one(".gs_a").text |
|
try: |
|
citation = result.select_one(".gs_fl > a[href*='cites']").text |
|
except: |
|
citation = 'cited by 0' |
|
abstract = result.select_one(".gs_rs").text.strip() |
|
search = arxiv.Search( |
|
query = title, |
|
max_results = 1, |
|
sort_by = arxiv.SortCriterion.Relevance, |
|
) |
|
paper = next(search.results()) |
|
if string_similar(title, paper.title) > 0.90: |
|
abstract = paper.summary.replace('\n', ' ') |
|
is_paper_in_arxiv = True |
|
else: |
|
abstract = abstract |
|
is_paper_in_arxiv = False |
|
paper = next(search.results()) |
|
print(title) |
|
print(author) |
|
print(citation) |
|
profile.append({ |
|
'title':title, |
|
'author':author, |
|
'citation':citation, |
|
'abstract':abstract, |
|
'is_paper_in_arxiv':is_paper_in_arxiv, |
|
}) |
|
|
|
chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract] |
|
msg = "正常" |
|
yield chatbot, [], msg |
|
return profile |
|
|
|
@CatchException |
|
def 谷歌检索小助手(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): |
|
|
|
chatbot.append([ |
|
"函数插件功能?", |
|
"分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."]) |
|
yield chatbot, history, '正常' |
|
|
|
|
|
try: |
|
import arxiv |
|
from bs4 import BeautifulSoup |
|
except: |
|
report_execption(chatbot, history, |
|
a = f"解析项目: {txt}", |
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") |
|
yield chatbot, history, '正常' |
|
return |
|
|
|
|
|
history = [] |
|
|
|
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history) |
|
|
|
if len(meta_paper_info_list[:10]) > 0: |
|
i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \ |
|
"1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \ |
|
f"以下是信息源:{str(meta_paper_info_list[:10])}" |
|
|
|
inputs_show_user = f"请分析此页面中出现的所有文章:{txt}" |
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( |
|
inputs=i_say, inputs_show_user=inputs_show_user, |
|
top_p=top_p, temperature=temperature, chatbot=chatbot, history=[], |
|
sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。" |
|
) |
|
|
|
history.extend([ "第一批", gpt_say ]) |
|
meta_paper_info_list = meta_paper_info_list[10:] |
|
|
|
chatbot.append(["状态?", "已经全部完成"]) |
|
msg = '正常' |
|
yield chatbot, history, msg |
|
res = write_results_to_file(history) |
|
chatbot.append(("完成了吗?", res)); |
|
yield chatbot, history, msg |
|
|