|
from toolbox import update_ui |
|
from toolbox import CatchException, report_execption |
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive |
|
from toolbox import write_history_to_file, promote_file_to_downloadzone |
|
|
|
fast_debug = False |
|
|
|
def readPdf(pdfPath): |
|
""" |
|
读取pdf文件,返回文本内容 |
|
""" |
|
import pdfminer |
|
from pdfminer.pdfparser import PDFParser |
|
from pdfminer.pdfdocument import PDFDocument |
|
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed |
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter |
|
from pdfminer.pdfdevice import PDFDevice |
|
from pdfminer.layout import LAParams |
|
from pdfminer.converter import PDFPageAggregator |
|
|
|
fp = open(pdfPath, 'rb') |
|
|
|
|
|
parser = PDFParser(fp) |
|
|
|
|
|
|
|
document = PDFDocument(parser) |
|
|
|
if not document.is_extractable: |
|
raise PDFTextExtractionNotAllowed |
|
|
|
|
|
rsrcmgr = PDFResourceManager() |
|
|
|
|
|
|
|
|
|
|
|
|
|
laparams = LAParams( |
|
char_margin=10.0, |
|
line_margin=0.2, |
|
boxes_flow=0.2, |
|
all_texts=False, |
|
) |
|
|
|
device = PDFPageAggregator(rsrcmgr, laparams=laparams) |
|
|
|
interpreter = PDFPageInterpreter(rsrcmgr, device) |
|
|
|
|
|
outTextList = [] |
|
for page in PDFPage.create_pages(document): |
|
|
|
interpreter.process_page(page) |
|
layout = device.get_result() |
|
for obj in layout._objs: |
|
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): |
|
|
|
outTextList.append(obj.get_text()) |
|
|
|
return outTextList |
|
|
|
|
|
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): |
|
import time, glob, os |
|
from bs4 import BeautifulSoup |
|
print('begin analysis on:', file_manifest) |
|
for index, fp in enumerate(file_manifest): |
|
if ".tex" in fp: |
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f: |
|
file_content = f.read() |
|
if ".pdf" in fp.lower(): |
|
file_content = readPdf(fp) |
|
file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') |
|
|
|
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" |
|
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' |
|
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' |
|
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
|
|
if not fast_debug: |
|
msg = '正常' |
|
|
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( |
|
inputs=i_say, |
|
inputs_show_user=i_say_show_user, |
|
llm_kwargs=llm_kwargs, |
|
chatbot=chatbot, |
|
history=[], |
|
sys_prompt="总结文章。" |
|
) |
|
chatbot[-1] = (i_say_show_user, gpt_say) |
|
history.append(i_say_show_user); history.append(gpt_say) |
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) |
|
if not fast_debug: time.sleep(2) |
|
|
|
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) |
|
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' |
|
chatbot.append((i_say, "[Local Message] waiting gpt response.")) |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
|
|
if not fast_debug: |
|
msg = '正常' |
|
|
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( |
|
inputs=i_say, |
|
inputs_show_user=i_say, |
|
llm_kwargs=llm_kwargs, |
|
chatbot=chatbot, |
|
history=history, |
|
sys_prompt="总结文章。" |
|
) |
|
chatbot[-1] = (i_say, gpt_say) |
|
history.append(i_say); history.append(gpt_say) |
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) |
|
res = write_history_to_file(history) |
|
promote_file_to_downloadzone(res, chatbot=chatbot) |
|
chatbot.append(("完成了吗?", res)) |
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) |
|
|
|
|
|
|
|
@CatchException |
|
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): |
|
history = [] |
|
import glob, os |
|
|
|
|
|
chatbot.append([ |
|
"函数插件功能?", |
|
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
|
|
|
|
try: |
|
import pdfminer, bs4 |
|
except: |
|
report_execption(chatbot, history, |
|
a = f"解析项目: {txt}", |
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
return |
|
if os.path.exists(txt): |
|
project_folder = txt |
|
else: |
|
if txt == "": txt = '空空如也的输入栏' |
|
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
return |
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ |
|
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] |
|
|
|
|
|
if len(file_manifest) == 0: |
|
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") |
|
yield from update_ui(chatbot=chatbot, history=history) |
|
return |
|
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) |
|
|
|
|