from toolbox import update_ui from toolbox import CatchException, report_execption, write_results_to_file from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False def readPdf(pdfPath): """ 读取pdf文件,返回文本内容 """ import pdfminer from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfdevice import PDFDevice from pdfminer.layout import LAParams from pdfminer.converter import PDFPageAggregator fp = open(pdfPath, 'rb') # Create a PDF parser object associated with the file object parser = PDFParser(fp) # Create a PDF document object that stores the document structure. # Password for initialization as 2nd parameter document = PDFDocument(parser) # Check if the document allows text extraction. If not, abort. if not document.is_extractable: raise PDFTextExtractionNotAllowed # Create a PDF resource manager object that stores shared resources. rsrcmgr = PDFResourceManager() # Create a PDF device object. # device = PDFDevice(rsrcmgr) # BEGIN LAYOUT ANALYSIS. # Set parameters for analysis. laparams = LAParams( char_margin=10.0, line_margin=0.2, boxes_flow=0.2, all_texts=False, ) # Create a PDF page aggregator object. device = PDFPageAggregator(rsrcmgr, laparams=laparams) # Create a PDF interpreter object. interpreter = PDFPageInterpreter(rsrcmgr, device) # loop over all pages in the document outTextList = [] for page in PDFPage.create_pages(document): # read the page into a layout object interpreter.process_page(page) layout = device.get_result() for obj in layout._objs: if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): # print(obj.get_text()) outTextList.append(obj.get_text()) return outTextList def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import time, glob, os from bs4 import BeautifulSoup print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): if ".tex" in fp: with open(fp, 'r', encoding='utf-8', errors='replace') as f: file_content = f.read() if ".pdf" in fp.lower(): file_content = readPdf(fp) file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 if not fast_debug: msg = '正常' # ** gpt request ** gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="总结文章。" ) # 带超时倒计时 chatbot[-1] = (i_say_show_user, gpt_say) history.append(i_say_show_user); history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 if not fast_debug: time.sleep(2) all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 if not fast_debug: msg = '正常' # ** gpt request ** gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, sys_prompt="总结文章。" ) # 带超时倒计时 chatbot[-1] = (i_say, gpt_say) history.append(i_say); history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 @CatchException def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): history = [] # 清空历史,以免输入溢出 import glob, os # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: import pdfminer, bs4 except: report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)