Spaces:
Running
Running
更新
Browse files- .gitignore +3 -1
- Dockerfile +4 -4
- app.py +73 -48
- check_proxy.py +118 -3
- colorful.py +91 -0
- config.py +12 -5
- functional.py → core_functional.py +5 -4
- functional_crazy.py → crazy_functional.py +85 -26
- crazy_functions/Latex全文润色.py +176 -0
- crazy_functions/Latex全文翻译.py +176 -0
- crazy_functions/crazy_utils.py +362 -0
- crazy_functions/下载arxiv论文翻译摘要.py +19 -11
- crazy_functions/代码重写为全英文_多线程.py +92 -28
- crazy_functions/总结word文档.py +33 -21
- crazy_functions/批量总结PDF文档.py +32 -20
- crazy_functions/批量总结PDF文档pdfminer.py +31 -22
- crazy_functions/批量翻译PDF文档_多线程.py +296 -0
- crazy_functions/理解PDF文档内容.py +186 -0
- crazy_functions/生成函数注释.py +14 -17
- crazy_functions/解析项目源代码.py +124 -104
- crazy_functions/读文章写摘要.py +15 -18
- crazy_functions/谷歌检索小助手.py +106 -0
- crazy_functions/高级功能函数模板.py +20 -16
- main.py +174 -0
- request_llm/README.md +36 -0
- predict.py → request_llm/bridge_chatgpt.py +68 -63
- request_llm/bridge_tgui.py +167 -0
- requirements.txt +0 -11
- self_analysis.md +0 -175
- show_math.py +0 -80
- theme.py +91 -12
- toolbox.py +253 -68
- version +5 -0
- 步骤 +0 -27
.gitignore
CHANGED
@@ -139,4 +139,6 @@ config_private.py
|
|
139 |
gpt_log
|
140 |
private.md
|
141 |
private_upload
|
142 |
-
other_llms
|
|
|
|
|
|
139 |
gpt_log
|
140 |
private.md
|
141 |
private_upload
|
142 |
+
other_llms
|
143 |
+
cradle*
|
144 |
+
debug*
|
Dockerfile
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
FROM python:3.11
|
2 |
-
|
3 |
RUN echo '[global]' > /etc/pip.conf && \
|
4 |
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
5 |
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
6 |
|
7 |
-
RUN pip3 install gradio requests[socks] mdtex2html
|
8 |
|
9 |
-
COPY . /gpt
|
10 |
WORKDIR /gpt
|
|
|
|
|
11 |
|
|
|
12 |
|
13 |
-
CMD ["python3", "main.py"]
|
|
|
1 |
FROM python:3.11
|
|
|
2 |
RUN echo '[global]' > /etc/pip.conf && \
|
3 |
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
4 |
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
5 |
|
|
|
6 |
|
|
|
7 |
WORKDIR /gpt
|
8 |
+
COPY requirements.txt .
|
9 |
+
RUN pip3 install -r requirements.txt
|
10 |
|
11 |
+
COPY . .
|
12 |
|
13 |
+
CMD ["python3", "-u", "main.py"]
|
app.py
CHANGED
@@ -1,18 +1,19 @@
|
|
1 |
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
import gradio as gr
|
3 |
-
from
|
4 |
-
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf
|
5 |
|
6 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
7 |
-
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = \
|
8 |
-
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT')
|
9 |
|
10 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
11 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
12 |
if not AUTHENTICATION: AUTHENTICATION = None
|
13 |
|
|
|
14 |
initial_prompt = "Serve me as a writing and programming assistant."
|
15 |
-
title_html = "<h1 align=\"center\">ChatGPT
|
16 |
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
17 |
|
18 |
# 问询记录, python 版本建议3.9+(越新越好)
|
@@ -23,12 +24,12 @@ except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.IN
|
|
23 |
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
24 |
|
25 |
# 一些普通功能模块
|
26 |
-
from
|
27 |
-
functional =
|
28 |
|
29 |
# 高级函数插件
|
30 |
-
from
|
31 |
-
crazy_fns =
|
32 |
|
33 |
# 处理markdown文本格式的转变
|
34 |
gr.Chatbot.postprocess = format_io
|
@@ -37,30 +38,38 @@ gr.Chatbot.postprocess = format_io
|
|
37 |
from theme import adjust_theme, advanced_css
|
38 |
set_theme = adjust_theme()
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
cancel_handles = []
|
41 |
-
with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
42 |
gr.HTML(title_html)
|
43 |
-
|
44 |
-
gr.
|
45 |
-
|
46 |
-
|
47 |
-
with gr.Column(scale=2):
|
48 |
chatbot = gr.Chatbot()
|
49 |
chatbot.style(height=CHATBOT_HEIGHT)
|
50 |
history = gr.State([])
|
51 |
-
with
|
52 |
-
with gr.
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
from check_proxy import check_proxy
|
63 |
-
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {check_proxy(proxies)}")
|
64 |
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
65 |
with gr.Row():
|
66 |
for k in functional:
|
@@ -68,12 +77,13 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
|
68 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
69 |
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
70 |
with gr.Row():
|
71 |
-
gr.Markdown("
|
72 |
with gr.Row():
|
73 |
for k in crazy_fns:
|
74 |
if not crazy_fns[k].get("AsButton", True): continue
|
75 |
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
76 |
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
|
|
77 |
with gr.Row():
|
78 |
with gr.Accordion("更多函数插件", open=True):
|
79 |
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
@@ -84,38 +94,51 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
|
84 |
with gr.Row():
|
85 |
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
86 |
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
87 |
-
with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=
|
88 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
89 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
90 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
91 |
-
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
92 |
gr.Markdown(description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
# 功能区显示开关与功能区的互动
|
94 |
def fn_area_visibility(a):
|
95 |
ret = {}
|
96 |
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
97 |
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
|
|
|
|
|
|
98 |
return ret
|
99 |
-
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn] )
|
100 |
# 整理反复出现的控件句柄组合
|
101 |
-
input_combo = [txt,
|
102 |
-
output_combo = [chatbot, history, status]
|
103 |
-
predict_args = dict(fn=predict, inputs=input_combo, outputs=output_combo)
|
104 |
-
empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt]) # 用于在提交后清空输入栏
|
105 |
# 提交按钮、重置按钮
|
106 |
-
cancel_handles.append(txt.submit(**predict_args))
|
107 |
-
cancel_handles.append(
|
108 |
-
|
|
|
|
|
|
|
109 |
# 基础功能区的回调函数注册
|
110 |
for k in functional:
|
111 |
-
click_handle = functional[k]["Button"].click(predict, [*input_combo, gr.State(True), gr.State(k)], output_combo)
|
112 |
cancel_handles.append(click_handle)
|
113 |
# 文件上传区,接收文件后与chatbot的互动
|
114 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
115 |
# 函数插件-固定按钮区
|
116 |
for k in crazy_fns:
|
117 |
if not crazy_fns[k].get("AsButton", True): continue
|
118 |
-
click_handle = crazy_fns[k]["Button"].click(crazy_fns[k]["Function"], [*input_combo, gr.State(PORT)], output_combo)
|
119 |
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
120 |
cancel_handles.append(click_handle)
|
121 |
# 函数插件-下拉菜单与随变按钮的互动
|
@@ -126,7 +149,7 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
|
126 |
# 随变按钮的回调函数注册
|
127 |
def route(k, *args, **kwargs):
|
128 |
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
129 |
-
yield from crazy_fns[k]["Function"](*args, **kwargs)
|
130 |
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
131 |
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
132 |
# def expand_file_area(file_upload, area_file_up):
|
@@ -135,16 +158,18 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
|
135 |
cancel_handles.append(click_handle)
|
136 |
# 终止按钮的回调函数注册
|
137 |
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
138 |
-
|
139 |
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
140 |
def auto_opentab_delay():
|
141 |
import threading, webbrowser, time
|
142 |
-
print(f"如果浏览器没有自动打开,请复制并转到以下URL
|
|
|
|
|
143 |
def open():
|
144 |
-
time.sleep(2)
|
145 |
-
webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
146 |
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
|
|
147 |
|
148 |
auto_opentab_delay()
|
149 |
-
demo.
|
150 |
-
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False)
|
|
|
1 |
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
import gradio as gr
|
3 |
+
from request_llm.bridge_chatgpt import predict
|
4 |
+
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
5 |
|
6 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
7 |
+
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
8 |
+
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
9 |
|
10 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
11 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
12 |
if not AUTHENTICATION: AUTHENTICATION = None
|
13 |
|
14 |
+
from check_proxy import get_current_version
|
15 |
initial_prompt = "Serve me as a writing and programming assistant."
|
16 |
+
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
17 |
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
18 |
|
19 |
# 问询记录, python 版本建议3.9+(越新越好)
|
|
|
24 |
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
25 |
|
26 |
# 一些普通功能模块
|
27 |
+
from core_functional import get_core_functions
|
28 |
+
functional = get_core_functions()
|
29 |
|
30 |
# 高级函数插件
|
31 |
+
from crazy_functional import get_crazy_functions
|
32 |
+
crazy_fns = get_crazy_functions()
|
33 |
|
34 |
# 处理markdown文本格式的转变
|
35 |
gr.Chatbot.postprocess = format_io
|
|
|
38 |
from theme import adjust_theme, advanced_css
|
39 |
set_theme = adjust_theme()
|
40 |
|
41 |
+
# 代理与自动更新
|
42 |
+
from check_proxy import check_proxy, auto_update
|
43 |
+
proxy_info = check_proxy(proxies)
|
44 |
+
|
45 |
+
gr_L1 = lambda: gr.Row().style()
|
46 |
+
gr_L2 = lambda scale: gr.Column(scale=scale)
|
47 |
+
if LAYOUT == "TOP-DOWN":
|
48 |
+
gr_L1 = lambda: DummyWith()
|
49 |
+
gr_L2 = lambda scale: gr.Row()
|
50 |
+
CHATBOT_HEIGHT /= 2
|
51 |
+
|
52 |
cancel_handles = []
|
53 |
+
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
54 |
gr.HTML(title_html)
|
55 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!打开页面后请在输入框内输入API-KEY然后回车。<br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!</center>''')
|
56 |
+
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
57 |
+
with gr_L1():
|
58 |
+
with gr_L2(scale=2):
|
|
|
59 |
chatbot = gr.Chatbot()
|
60 |
chatbot.style(height=CHATBOT_HEIGHT)
|
61 |
history = gr.State([])
|
62 |
+
with gr_L2(scale=1):
|
63 |
+
with gr.Accordion("输入区", open=True) as area_input_primary:
|
64 |
+
with gr.Row():
|
65 |
+
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
66 |
+
with gr.Row():
|
67 |
+
submitBtn = gr.Button("提交", variant="primary")
|
68 |
+
with gr.Row():
|
69 |
+
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
70 |
+
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
71 |
+
with gr.Row():
|
72 |
+
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
|
|
|
|
73 |
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
74 |
with gr.Row():
|
75 |
for k in functional:
|
|
|
77 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
78 |
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
79 |
with gr.Row():
|
80 |
+
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
81 |
with gr.Row():
|
82 |
for k in crazy_fns:
|
83 |
if not crazy_fns[k].get("AsButton", True): continue
|
84 |
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
85 |
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
86 |
+
crazy_fns[k]["Button"].style(size="sm")
|
87 |
with gr.Row():
|
88 |
with gr.Accordion("更多函数插件", open=True):
|
89 |
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
|
|
94 |
with gr.Row():
|
95 |
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
96 |
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
97 |
+
with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
|
98 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
99 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
100 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
101 |
+
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
102 |
gr.Markdown(description)
|
103 |
+
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
104 |
+
with gr.Row():
|
105 |
+
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
106 |
+
with gr.Row():
|
107 |
+
submitBtn2 = gr.Button("提交", variant="primary")
|
108 |
+
with gr.Row():
|
109 |
+
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
110 |
+
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
111 |
# 功能区显示开关与功能区的互动
|
112 |
def fn_area_visibility(a):
|
113 |
ret = {}
|
114 |
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
115 |
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
116 |
+
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
117 |
+
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
118 |
+
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
119 |
return ret
|
120 |
+
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
|
121 |
# 整理反复出现的控件句柄组合
|
122 |
+
input_combo = [cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
123 |
+
output_combo = [cookies, chatbot, history, status]
|
124 |
+
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
|
|
125 |
# 提交按钮、重置按钮
|
126 |
+
cancel_handles.append(txt.submit(**predict_args))
|
127 |
+
cancel_handles.append(txt2.submit(**predict_args))
|
128 |
+
cancel_handles.append(submitBtn.click(**predict_args))
|
129 |
+
cancel_handles.append(submitBtn2.click(**predict_args))
|
130 |
+
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
131 |
+
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
132 |
# 基础功能区的回调函数注册
|
133 |
for k in functional:
|
134 |
+
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
135 |
cancel_handles.append(click_handle)
|
136 |
# 文件上传区,接收文件后与chatbot的互动
|
137 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
138 |
# 函数插件-固定按钮区
|
139 |
for k in crazy_fns:
|
140 |
if not crazy_fns[k].get("AsButton", True): continue
|
141 |
+
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
142 |
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
143 |
cancel_handles.append(click_handle)
|
144 |
# 函数插件-下拉菜单与随变按钮的互动
|
|
|
149 |
# 随变按钮的回调函数注册
|
150 |
def route(k, *args, **kwargs):
|
151 |
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
152 |
+
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
153 |
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
154 |
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
155 |
# def expand_file_area(file_upload, area_file_up):
|
|
|
158 |
cancel_handles.append(click_handle)
|
159 |
# 终止按钮的回调函数注册
|
160 |
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
161 |
+
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
162 |
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
163 |
def auto_opentab_delay():
|
164 |
import threading, webbrowser, time
|
165 |
+
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
166 |
+
print(f"\t(亮色主题): http://localhost:{PORT}")
|
167 |
+
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
168 |
def open():
|
169 |
+
time.sleep(2) # 打开浏览器
|
170 |
+
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
171 |
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
172 |
+
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
173 |
|
174 |
auto_opentab_delay()
|
175 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False)
|
|
check_proxy.py
CHANGED
@@ -3,7 +3,8 @@ def check_proxy(proxies):
|
|
3 |
import requests
|
4 |
proxies_https = proxies['https'] if proxies is not None else '无'
|
5 |
try:
|
6 |
-
response = requests.get("https://ipapi.co/json/",
|
|
|
7 |
data = response.json()
|
8 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
9 |
if 'country_name' in data:
|
@@ -19,9 +20,123 @@ def check_proxy(proxies):
|
|
19 |
return result
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
if __name__ == '__main__':
|
23 |
-
import os
|
|
|
24 |
from toolbox import get_conf
|
25 |
proxies, = get_conf('proxies')
|
26 |
check_proxy(proxies)
|
27 |
-
|
|
|
3 |
import requests
|
4 |
proxies_https = proxies['https'] if proxies is not None else '无'
|
5 |
try:
|
6 |
+
response = requests.get("https://ipapi.co/json/",
|
7 |
+
proxies=proxies, timeout=4)
|
8 |
data = response.json()
|
9 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
10 |
if 'country_name' in data:
|
|
|
20 |
return result
|
21 |
|
22 |
|
23 |
+
def backup_and_download(current_version, remote_version):
|
24 |
+
"""
|
25 |
+
一键更新协议:备份和下载
|
26 |
+
"""
|
27 |
+
from toolbox import get_conf
|
28 |
+
import shutil
|
29 |
+
import os
|
30 |
+
import requests
|
31 |
+
import zipfile
|
32 |
+
os.makedirs(f'./history', exist_ok=True)
|
33 |
+
backup_dir = f'./history/backup-{current_version}/'
|
34 |
+
new_version_dir = f'./history/new-version-{remote_version}/'
|
35 |
+
if os.path.exists(new_version_dir):
|
36 |
+
return new_version_dir
|
37 |
+
os.makedirs(new_version_dir)
|
38 |
+
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
39 |
+
proxies, = get_conf('proxies')
|
40 |
+
r = requests.get(
|
41 |
+
'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
42 |
+
zip_file_path = backup_dir+'/master.zip'
|
43 |
+
with open(zip_file_path, 'wb+') as f:
|
44 |
+
f.write(r.content)
|
45 |
+
dst_path = new_version_dir
|
46 |
+
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
|
47 |
+
for zip_info in zip_ref.infolist():
|
48 |
+
dst_file_path = os.path.join(dst_path, zip_info.filename)
|
49 |
+
if os.path.exists(dst_file_path):
|
50 |
+
os.remove(dst_file_path)
|
51 |
+
zip_ref.extract(zip_info, dst_path)
|
52 |
+
return new_version_dir
|
53 |
+
|
54 |
+
|
55 |
+
def patch_and_restart(path):
|
56 |
+
"""
|
57 |
+
一键更新协议:覆盖和重启
|
58 |
+
"""
|
59 |
+
import distutils
|
60 |
+
import shutil
|
61 |
+
import os
|
62 |
+
import sys
|
63 |
+
import time
|
64 |
+
from colorful import print亮黄, print亮绿, print亮红
|
65 |
+
# if not using config_private, move origin config.py as config_private.py
|
66 |
+
if not os.path.exists('config_private.py'):
|
67 |
+
print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
|
68 |
+
'另外您可以随时在history子文件夹下找回旧版的程序。')
|
69 |
+
shutil.copyfile('config.py', 'config_private.py')
|
70 |
+
distutils.dir_util.copy_tree(path+'/chatgpt_academic-master', './')
|
71 |
+
import subprocess
|
72 |
+
print亮绿('代码已经更新,即将更新pip包依赖……')
|
73 |
+
for i in reversed(range(5)): time.sleep(1); print(i)
|
74 |
+
try:
|
75 |
+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
|
76 |
+
except:
|
77 |
+
print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
78 |
+
print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启')
|
79 |
+
print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
80 |
+
print(' ------------------------------ -----------------------------------')
|
81 |
+
for i in reversed(range(8)): time.sleep(1); print(i)
|
82 |
+
os.execl(sys.executable, sys.executable, *sys.argv)
|
83 |
+
|
84 |
+
|
85 |
+
def get_current_version():
|
86 |
+
import json
|
87 |
+
try:
|
88 |
+
with open('./version', 'r', encoding='utf8') as f:
|
89 |
+
current_version = json.loads(f.read())['version']
|
90 |
+
except:
|
91 |
+
current_version = ""
|
92 |
+
return current_version
|
93 |
+
|
94 |
+
|
95 |
+
def auto_update():
|
96 |
+
"""
|
97 |
+
一键更新协议:查询版本和用户意见
|
98 |
+
"""
|
99 |
+
try:
|
100 |
+
from toolbox import get_conf
|
101 |
+
import requests
|
102 |
+
import time
|
103 |
+
import json
|
104 |
+
proxies, = get_conf('proxies')
|
105 |
+
response = requests.get(
|
106 |
+
"https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=1)
|
107 |
+
remote_json_data = json.loads(response.text)
|
108 |
+
remote_version = remote_json_data['version']
|
109 |
+
if remote_json_data["show_feature"]:
|
110 |
+
new_feature = "新功能:" + remote_json_data["new_feature"]
|
111 |
+
else:
|
112 |
+
new_feature = ""
|
113 |
+
with open('./version', 'r', encoding='utf8') as f:
|
114 |
+
current_version = f.read()
|
115 |
+
current_version = json.loads(current_version)['version']
|
116 |
+
if (remote_version - current_version) >= 0.01:
|
117 |
+
from colorful import print亮黄
|
118 |
+
print亮黄(
|
119 |
+
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
120 |
+
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
121 |
+
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
122 |
+
if user_instruction in ['Y', 'y']:
|
123 |
+
path = backup_and_download(current_version, remote_version)
|
124 |
+
try:
|
125 |
+
patch_and_restart(path)
|
126 |
+
except:
|
127 |
+
print('更新失败。')
|
128 |
+
else:
|
129 |
+
print('自动更新程序:已禁用')
|
130 |
+
return
|
131 |
+
else:
|
132 |
+
return
|
133 |
+
except:
|
134 |
+
print('自动更新程序:已禁用')
|
135 |
+
|
136 |
+
|
137 |
if __name__ == '__main__':
|
138 |
+
import os
|
139 |
+
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
140 |
from toolbox import get_conf
|
141 |
proxies, = get_conf('proxies')
|
142 |
check_proxy(proxies)
|
|
colorful.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
from sys import stdout
|
3 |
+
|
4 |
+
if platform.system()=="Linux":
|
5 |
+
pass
|
6 |
+
else:
|
7 |
+
from colorama import init
|
8 |
+
init()
|
9 |
+
|
10 |
+
# Do you like the elegance of Chinese characters?
|
11 |
+
def print红(*kw,**kargs):
|
12 |
+
print("\033[0;31m",*kw,"\033[0m",**kargs)
|
13 |
+
def print绿(*kw,**kargs):
|
14 |
+
print("\033[0;32m",*kw,"\033[0m",**kargs)
|
15 |
+
def print黄(*kw,**kargs):
|
16 |
+
print("\033[0;33m",*kw,"\033[0m",**kargs)
|
17 |
+
def print蓝(*kw,**kargs):
|
18 |
+
print("\033[0;34m",*kw,"\033[0m",**kargs)
|
19 |
+
def print紫(*kw,**kargs):
|
20 |
+
print("\033[0;35m",*kw,"\033[0m",**kargs)
|
21 |
+
def print靛(*kw,**kargs):
|
22 |
+
print("\033[0;36m",*kw,"\033[0m",**kargs)
|
23 |
+
|
24 |
+
def print亮红(*kw,**kargs):
|
25 |
+
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
26 |
+
def print亮绿(*kw,**kargs):
|
27 |
+
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
28 |
+
def print亮黄(*kw,**kargs):
|
29 |
+
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
30 |
+
def print亮蓝(*kw,**kargs):
|
31 |
+
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
32 |
+
def print亮紫(*kw,**kargs):
|
33 |
+
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
34 |
+
def print亮靛(*kw,**kargs):
|
35 |
+
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
def print亮红(*kw,**kargs):
|
40 |
+
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
41 |
+
def print亮绿(*kw,**kargs):
|
42 |
+
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
43 |
+
def print亮黄(*kw,**kargs):
|
44 |
+
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
45 |
+
def print亮蓝(*kw,**kargs):
|
46 |
+
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
47 |
+
def print亮紫(*kw,**kargs):
|
48 |
+
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
49 |
+
def print亮靛(*kw,**kargs):
|
50 |
+
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
51 |
+
|
52 |
+
print_red = print红
|
53 |
+
print_green = print绿
|
54 |
+
print_yellow = print黄
|
55 |
+
print_blue = print蓝
|
56 |
+
print_purple = print紫
|
57 |
+
print_indigo = print靛
|
58 |
+
|
59 |
+
print_bold_red = print亮红
|
60 |
+
print_bold_green = print亮绿
|
61 |
+
print_bold_yellow = print亮黄
|
62 |
+
print_bold_blue = print亮蓝
|
63 |
+
print_bold_purple = print亮紫
|
64 |
+
print_bold_indigo = print亮靛
|
65 |
+
|
66 |
+
if not stdout.isatty():
|
67 |
+
# redirection, avoid a fucked up log file
|
68 |
+
print红 = print
|
69 |
+
print绿 = print
|
70 |
+
print黄 = print
|
71 |
+
print蓝 = print
|
72 |
+
print紫 = print
|
73 |
+
print靛 = print
|
74 |
+
print亮红 = print
|
75 |
+
print亮绿 = print
|
76 |
+
print亮黄 = print
|
77 |
+
print亮蓝 = print
|
78 |
+
print亮紫 = print
|
79 |
+
print亮靛 = print
|
80 |
+
print_red = print
|
81 |
+
print_green = print
|
82 |
+
print_yellow = print
|
83 |
+
print_blue = print
|
84 |
+
print_purple = print
|
85 |
+
print_indigo = print
|
86 |
+
print_bold_red = print
|
87 |
+
print_bold_green = print
|
88 |
+
print_bold_yellow = print
|
89 |
+
print_bold_blue = print
|
90 |
+
print_bold_purple = print
|
91 |
+
print_bold_indigo = print
|
config.py
CHANGED
@@ -11,10 +11,10 @@ if USE_PROXY:
|
|
11 |
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
12 |
|
13 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
14 |
-
proxies = {
|
15 |
# [协议]:// [地址] :[端口]
|
16 |
-
"http": "socks5h://localhost:11284",
|
17 |
-
"https": "socks5h://localhost:11284",
|
18 |
}
|
19 |
else:
|
20 |
proxies = None
|
@@ -24,8 +24,14 @@ else:
|
|
24 |
# 对话窗的高度
|
25 |
CHATBOT_HEIGHT = 1115
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
# 发送请求到OpenAI后,等待多久判定为超时
|
28 |
-
TIMEOUT_SECONDS =
|
29 |
|
30 |
# 网页的端口, -1代表随机端口
|
31 |
WEB_PORT = -1
|
@@ -43,4 +49,5 @@ API_URL = "https://api.openai.com/v1/chat/completions"
|
|
43 |
CONCURRENT_COUNT = 100
|
44 |
|
45 |
# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
46 |
-
|
|
|
|
11 |
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
12 |
|
13 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
14 |
+
proxies = {
|
15 |
# [协议]:// [地址] :[端口]
|
16 |
+
"http": "socks5h://localhost:11284",
|
17 |
+
"https": "socks5h://localhost:11284",
|
18 |
}
|
19 |
else:
|
20 |
proxies = None
|
|
|
24 |
# 对话窗的高度
|
25 |
CHATBOT_HEIGHT = 1115
|
26 |
|
27 |
+
# 代码高亮
|
28 |
+
CODE_HIGHLIGHT = True
|
29 |
+
|
30 |
+
# 窗口布局
|
31 |
+
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
32 |
+
|
33 |
# 发送请求到OpenAI后,等待多久判定为超时
|
34 |
+
TIMEOUT_SECONDS = 30
|
35 |
|
36 |
# 网页的端口, -1代表随机端口
|
37 |
WEB_PORT = -1
|
|
|
49 |
CONCURRENT_COUNT = 100
|
50 |
|
51 |
# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
52 |
+
# [("username", "password"), ("username2", "password2"), ...]
|
53 |
+
AUTHENTICATION = []
|
functional.py → core_functional.py
RENAMED
@@ -4,19 +4,20 @@
|
|
4 |
# 默认按钮颜色是 secondary
|
5 |
from toolbox import clear_line_break
|
6 |
|
7 |
-
|
|
|
8 |
return {
|
9 |
"英语学术润色": {
|
10 |
# 前言
|
11 |
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
12 |
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
13 |
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
14 |
-
# 后语
|
15 |
"Suffix": r"",
|
16 |
"Color": r"secondary", # 按钮颜色
|
17 |
},
|
18 |
"中文学术润色": {
|
19 |
-
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
20 |
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
21 |
"Suffix": r"",
|
22 |
},
|
@@ -55,7 +56,7 @@ def get_functionals():
|
|
55 |
"Color": "secondary",
|
56 |
},
|
57 |
"英译中": {
|
58 |
-
"Prefix": r"
|
59 |
"Suffix": r"",
|
60 |
},
|
61 |
"找图片": {
|
|
|
4 |
# 默认按钮颜色是 secondary
|
5 |
from toolbox import clear_line_break
|
6 |
|
7 |
+
|
8 |
+
def get_core_functions():
|
9 |
return {
|
10 |
"英语学术润色": {
|
11 |
# 前言
|
12 |
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
13 |
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
14 |
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
15 |
+
# 后语
|
16 |
"Suffix": r"",
|
17 |
"Color": r"secondary", # 按钮颜色
|
18 |
},
|
19 |
"中文学术润色": {
|
20 |
+
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
21 |
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
22 |
"Suffix": r"",
|
23 |
},
|
|
|
56 |
"Color": "secondary",
|
57 |
},
|
58 |
"英译中": {
|
59 |
+
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
60 |
"Suffix": r"",
|
61 |
},
|
62 |
"找图片": {
|
functional_crazy.py → crazy_functional.py
RENAMED
@@ -1,6 +1,7 @@
|
|
1 |
-
from toolbox import HotReload
|
2 |
|
3 |
-
|
|
|
4 |
###################### 第一组插件 ###########################
|
5 |
# [第一组插件]: 最早期编写的项目插件和一些demo
|
6 |
from crazy_functions.读文章写摘要 import 读文章写摘要
|
@@ -14,76 +15,137 @@ def get_crazy_functionals():
|
|
14 |
from crazy_functions.解析项目源代码 import 解析一个Rect项目
|
15 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
16 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
|
|
17 |
|
18 |
function_plugins = {
|
19 |
-
|
20 |
-
|
21 |
-
"Function": 解析项目本身
|
22 |
-
},
|
23 |
-
"解析整个Py项目": {
|
24 |
"Color": "stop", # 按钮颜色
|
25 |
-
"Function": 解析一个Python项目
|
26 |
},
|
27 |
"解析整个C++项目头文件": {
|
28 |
"Color": "stop", # 按钮颜色
|
29 |
-
"Function": 解析一个C项目的头文件
|
30 |
},
|
31 |
-
"解析整个C++项目(.cpp/.h)": {
|
32 |
"Color": "stop", # 按钮颜色
|
33 |
"AsButton": False, # 加入下拉菜单中
|
34 |
-
"Function": 解析一个C项目
|
35 |
},
|
36 |
"解析整个Go项目": {
|
37 |
"Color": "stop", # 按钮颜色
|
38 |
"AsButton": False, # 加入下拉菜单中
|
39 |
-
"Function": 解析一个Golang项目
|
40 |
},
|
41 |
"解析整个Java项目": {
|
42 |
"Color": "stop", # 按钮颜色
|
43 |
"AsButton": False, # 加入下拉菜单中
|
44 |
-
"Function": 解析一个Java项目
|
45 |
},
|
46 |
-
"解析整个
|
47 |
"Color": "stop", # 按钮颜色
|
48 |
"AsButton": False, # 加入下拉菜单中
|
49 |
-
"Function": 解析一个Rect项目
|
50 |
},
|
51 |
"读Tex论文写摘要": {
|
52 |
"Color": "stop", # 按钮颜色
|
53 |
-
"Function": 读文章写摘要
|
54 |
},
|
55 |
"批量生成函数注释": {
|
56 |
"Color": "stop", # 按钮颜色
|
57 |
-
"Function": 批量生成函数注释
|
|
|
|
|
|
|
58 |
},
|
59 |
"[多线程demo] 把本项目源代码切换成全英文": {
|
60 |
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
|
|
61 |
"Function": HotReload(全项目切换英文)
|
62 |
},
|
63 |
-
"[函数插件模板
|
64 |
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
65 |
"Function": HotReload(高阶功能模板函数)
|
66 |
},
|
|
|
67 |
}
|
68 |
###################### 第二组插件 ###########################
|
69 |
# [第二组插件]: 经过充分测试,但功能上距离达到完美状态还差一点点
|
70 |
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
71 |
from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
|
72 |
from crazy_functions.总结word文档 import 总结word文档
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
function_plugins.update({
|
74 |
-
"
|
|
|
|
|
|
|
|
|
|
|
75 |
"Color": "stop",
|
76 |
-
"
|
|
|
|
|
77 |
},
|
78 |
-
"[
|
79 |
"Color": "stop",
|
80 |
"AsButton": False, # 加入下拉菜单中
|
81 |
"Function": HotReload(批量总结PDF文档pdfminer)
|
82 |
},
|
83 |
-
"
|
|
|
|
|
|
|
|
|
|
|
84 |
"Color": "stop",
|
85 |
"Function": HotReload(总结word文档)
|
86 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
})
|
88 |
|
89 |
###################### 第三组插件 ###########################
|
@@ -97,12 +159,9 @@ def get_crazy_functionals():
|
|
97 |
"Function": HotReload(下载arxiv论文并翻译摘要)
|
98 |
}
|
99 |
})
|
|
|
100 |
except Exception as err:
|
101 |
print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
|
102 |
|
103 |
-
|
104 |
-
|
105 |
###################### 第n组插件 ###########################
|
106 |
return function_plugins
|
107 |
-
|
108 |
-
|
|
|
1 |
+
from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
|
2 |
|
3 |
+
|
4 |
+
def get_crazy_functions():
|
5 |
###################### 第一组插件 ###########################
|
6 |
# [第一组插件]: 最早期编写的项目插件和一些demo
|
7 |
from crazy_functions.读文章写摘要 import 读文章写摘要
|
|
|
15 |
from crazy_functions.解析项目源代码 import 解析一个Rect项目
|
16 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
17 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
18 |
+
from crazy_functions.Latex全文润色 import Latex英文润色
|
19 |
|
20 |
function_plugins = {
|
21 |
+
|
22 |
+
"解析整个Python项目": {
|
|
|
|
|
|
|
23 |
"Color": "stop", # 按钮颜色
|
24 |
+
"Function": HotReload(解析一个Python项目)
|
25 |
},
|
26 |
"解析整个C++项目头文件": {
|
27 |
"Color": "stop", # 按钮颜色
|
28 |
+
"Function": HotReload(解析一个C项目的头文件)
|
29 |
},
|
30 |
+
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
31 |
"Color": "stop", # 按钮颜色
|
32 |
"AsButton": False, # 加入下拉菜单中
|
33 |
+
"Function": HotReload(解析一个C项目)
|
34 |
},
|
35 |
"解析整个Go项目": {
|
36 |
"Color": "stop", # 按钮颜色
|
37 |
"AsButton": False, # 加入下拉菜单中
|
38 |
+
"Function": HotReload(解析一个Golang项目)
|
39 |
},
|
40 |
"解析整个Java项目": {
|
41 |
"Color": "stop", # 按钮颜色
|
42 |
"AsButton": False, # 加入下拉菜单中
|
43 |
+
"Function": HotReload(解析一个Java项目)
|
44 |
},
|
45 |
+
"解析整个React项目": {
|
46 |
"Color": "stop", # 按钮颜色
|
47 |
"AsButton": False, # 加入下拉菜单中
|
48 |
+
"Function": HotReload(解析一个Rect项目)
|
49 |
},
|
50 |
"读Tex论文写摘要": {
|
51 |
"Color": "stop", # 按钮颜色
|
52 |
+
"Function": HotReload(读文章写摘要)
|
53 |
},
|
54 |
"批量生成函数注释": {
|
55 |
"Color": "stop", # 按钮颜色
|
56 |
+
"Function": HotReload(批量生成函数注释)
|
57 |
+
},
|
58 |
+
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
59 |
+
"Function": HotReload(解析项目本身)
|
60 |
},
|
61 |
"[多线程demo] 把本项目源代码切换成全英文": {
|
62 |
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
63 |
+
"AsButton": False, # 加入下拉菜单中
|
64 |
"Function": HotReload(全项目切换英文)
|
65 |
},
|
66 |
+
"[函数插件模板Demo] 历史上的今天": {
|
67 |
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
68 |
"Function": HotReload(高阶功能模板函数)
|
69 |
},
|
70 |
+
|
71 |
}
|
72 |
###################### 第二组插件 ###########################
|
73 |
# [第二组插件]: 经过充分测试,但功能上距离达到完美状态还差一点点
|
74 |
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
75 |
from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
|
76 |
from crazy_functions.总结word文档 import 总结word文档
|
77 |
+
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
78 |
+
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
79 |
+
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容
|
80 |
+
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
81 |
+
from crazy_functions.Latex全文润色 import Latex中文润色
|
82 |
+
from crazy_functions.Latex全文翻译 import Latex中译英
|
83 |
+
from crazy_functions.Latex全文翻译 import Latex英译中
|
84 |
+
|
85 |
function_plugins.update({
|
86 |
+
"批量翻译PDF文档(多线程)": {
|
87 |
+
"Color": "stop",
|
88 |
+
"AsButton": True, # 加入下拉菜单中
|
89 |
+
"Function": HotReload(批量翻译PDF文档)
|
90 |
+
},
|
91 |
+
"[测试功能] 批量总结PDF文档": {
|
92 |
"Color": "stop",
|
93 |
+
"AsButton": False, # 加入下拉菜单中
|
94 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
95 |
+
"Function": HotReload(批量总结PDF文档)
|
96 |
},
|
97 |
+
"[测试功能] 批量总结PDF文档pdfminer": {
|
98 |
"Color": "stop",
|
99 |
"AsButton": False, # 加入下拉菜单中
|
100 |
"Function": HotReload(批量总结PDF文档pdfminer)
|
101 |
},
|
102 |
+
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
103 |
+
"Color": "stop",
|
104 |
+
"AsButton": False, # 加入下拉菜单中
|
105 |
+
"Function": HotReload(谷歌检索小助手)
|
106 |
+
},
|
107 |
+
"批量总结Word文档": {
|
108 |
"Color": "stop",
|
109 |
"Function": HotReload(总结word文档)
|
110 |
},
|
111 |
+
# "[测试功能] 理解PDF文档内容(Tk文件选择接口,仅本地)": {
|
112 |
+
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
113 |
+
# "AsButton": False, # 加入下拉菜单中
|
114 |
+
# "Function": HotReload(理解PDF文档内容)
|
115 |
+
# },
|
116 |
+
"[测试功能] 理解PDF文档内容(通用接口,读取文件输入区)": {
|
117 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
118 |
+
"Color": "stop",
|
119 |
+
"AsButton": False, # 加入下拉菜单中
|
120 |
+
"Function": HotReload(理解PDF文档内容标准文件输入)
|
121 |
+
},
|
122 |
+
"[测试功能] 英文Latex项目全文润色(输入路径或上传压缩包)": {
|
123 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
124 |
+
"Color": "stop",
|
125 |
+
"AsButton": False, # 加入下拉菜单中
|
126 |
+
"Function": HotReload(Latex英文润色)
|
127 |
+
},
|
128 |
+
"[测试功能] 中文Latex项目全文润色(输入路径或上传压缩包)": {
|
129 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
130 |
+
"Color": "stop",
|
131 |
+
"AsButton": False, # 加入下拉菜单中
|
132 |
+
"Function": HotReload(Latex中文润色)
|
133 |
+
},
|
134 |
+
|
135 |
+
"[测试功能] Latex项目全文中译英(输入路径或上传压缩包)": {
|
136 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
137 |
+
"Color": "stop",
|
138 |
+
"AsButton": False, # 加入下拉菜单中
|
139 |
+
"Function": HotReload(Latex中译英)
|
140 |
+
},
|
141 |
+
"[测试功能] Latex项目全文英译中(输入路径或上传压缩包)": {
|
142 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
143 |
+
"Color": "stop",
|
144 |
+
"AsButton": False, # 加入下拉菜单中
|
145 |
+
"Function": HotReload(Latex英译中)
|
146 |
+
},
|
147 |
+
|
148 |
+
|
149 |
})
|
150 |
|
151 |
###################### 第三组插件 ###########################
|
|
|
159 |
"Function": HotReload(下载arxiv论文并翻译摘要)
|
160 |
}
|
161 |
})
|
162 |
+
|
163 |
except Exception as err:
|
164 |
print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
|
165 |
|
|
|
|
|
166 |
###################### 第n组插件 ###########################
|
167 |
return function_plugins
|
|
|
|
crazy_functions/Latex全文润色.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
fast_debug = False
|
4 |
+
|
5 |
+
class PaperFileGroup():
|
6 |
+
def __init__(self):
|
7 |
+
self.file_paths = []
|
8 |
+
self.file_contents = []
|
9 |
+
self.sp_file_contents = []
|
10 |
+
self.sp_file_index = []
|
11 |
+
self.sp_file_tag = []
|
12 |
+
|
13 |
+
# count_token
|
14 |
+
import tiktoken
|
15 |
+
from toolbox import get_conf
|
16 |
+
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
17 |
+
def get_token_num(txt): return len(enc.encode(txt))
|
18 |
+
self.get_token_num = get_token_num
|
19 |
+
|
20 |
+
def run_file_split(self, max_token_limit=1900):
|
21 |
+
"""
|
22 |
+
将长文本分离开来
|
23 |
+
"""
|
24 |
+
for index, file_content in enumerate(self.file_contents):
|
25 |
+
if self.get_token_num(file_content) < max_token_limit:
|
26 |
+
self.sp_file_contents.append(file_content)
|
27 |
+
self.sp_file_index.append(index)
|
28 |
+
self.sp_file_tag.append(self.file_paths[index])
|
29 |
+
else:
|
30 |
+
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
31 |
+
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
32 |
+
for j, segment in enumerate(segments):
|
33 |
+
self.sp_file_contents.append(segment)
|
34 |
+
self.sp_file_index.append(index)
|
35 |
+
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
36 |
+
|
37 |
+
print('Segmentation: done')
|
38 |
+
|
39 |
+
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
40 |
+
import time, os, re
|
41 |
+
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
42 |
+
|
43 |
+
|
44 |
+
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
45 |
+
pfg = PaperFileGroup()
|
46 |
+
|
47 |
+
for index, fp in enumerate(file_manifest):
|
48 |
+
with open(fp, 'r', encoding='utf-8') as f:
|
49 |
+
file_content = f.read()
|
50 |
+
# 定义注释的正则表达式
|
51 |
+
comment_pattern = r'%.*'
|
52 |
+
# 使用正则表达式查找注释,并替换为空字符串
|
53 |
+
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
54 |
+
# 记录删除注释后的文本
|
55 |
+
pfg.file_paths.append(fp)
|
56 |
+
pfg.file_contents.append(clean_tex_content)
|
57 |
+
|
58 |
+
# <-------- 拆分过长的latex文件 ---------->
|
59 |
+
pfg.run_file_split(max_token_limit=1024)
|
60 |
+
n_split = len(pfg.sp_file_contents)
|
61 |
+
|
62 |
+
# <-------- 抽取摘要 ---------->
|
63 |
+
# if language == 'en':
|
64 |
+
# abs_extract_inputs = f"Please write an abstract for this paper"
|
65 |
+
|
66 |
+
# # 单线,获取文章meta信息
|
67 |
+
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
68 |
+
# inputs=abs_extract_inputs,
|
69 |
+
# inputs_show_user=f"正在抽取摘要信息。",
|
70 |
+
# llm_kwargs=llm_kwargs,
|
71 |
+
# chatbot=chatbot, history=[],
|
72 |
+
# sys_prompt="Your job is to collect information from materials。",
|
73 |
+
# )
|
74 |
+
|
75 |
+
# <-------- 多线程润色开始 ---------->
|
76 |
+
if language == 'en':
|
77 |
+
inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
78 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
79 |
+
inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag]
|
80 |
+
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
81 |
+
elif language == 'zh':
|
82 |
+
inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
83 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
84 |
+
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
85 |
+
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
86 |
+
|
87 |
+
|
88 |
+
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
89 |
+
inputs_array=inputs_array,
|
90 |
+
inputs_show_user_array=inputs_show_user_array,
|
91 |
+
llm_kwargs=llm_kwargs,
|
92 |
+
chatbot=chatbot,
|
93 |
+
history_array=[[""] for _ in range(n_split)],
|
94 |
+
sys_prompt_array=sys_prompt_array,
|
95 |
+
max_workers=10, # OpenAI所允许的最大并行过载
|
96 |
+
scroller_max_len = 80
|
97 |
+
)
|
98 |
+
|
99 |
+
# <-------- 整理结果,退出 ---------->
|
100 |
+
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
101 |
+
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
102 |
+
history = gpt_response_collection
|
103 |
+
chatbot.append((f"{fp}完成了吗?", res))
|
104 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷���界面
|
105 |
+
|
106 |
+
|
107 |
+
@CatchException
|
108 |
+
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
109 |
+
# 基本信息:功能、贡献者
|
110 |
+
chatbot.append([
|
111 |
+
"函数插件功能?",
|
112 |
+
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
113 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
114 |
+
|
115 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
116 |
+
try:
|
117 |
+
import tiktoken
|
118 |
+
except:
|
119 |
+
report_execption(chatbot, history,
|
120 |
+
a=f"解析项目: {txt}",
|
121 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
122 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
123 |
+
return
|
124 |
+
history = [] # 清空历史,以免输入溢出
|
125 |
+
import glob, os
|
126 |
+
if os.path.exists(txt):
|
127 |
+
project_folder = txt
|
128 |
+
else:
|
129 |
+
if txt == "": txt = '空空如也的输入栏'
|
130 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
131 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
132 |
+
return
|
133 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
134 |
+
if len(file_manifest) == 0:
|
135 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
136 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
137 |
+
return
|
138 |
+
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
@CatchException
|
146 |
+
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
147 |
+
# 基本信息:功能、贡献者
|
148 |
+
chatbot.append([
|
149 |
+
"函数插件功能?",
|
150 |
+
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
151 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
152 |
+
|
153 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
154 |
+
try:
|
155 |
+
import tiktoken
|
156 |
+
except:
|
157 |
+
report_execption(chatbot, history,
|
158 |
+
a=f"解析项目: {txt}",
|
159 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
160 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
161 |
+
return
|
162 |
+
history = [] # 清空历史,以免输入溢出
|
163 |
+
import glob, os
|
164 |
+
if os.path.exists(txt):
|
165 |
+
project_folder = txt
|
166 |
+
else:
|
167 |
+
if txt == "": txt = '空空如也的输入栏'
|
168 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
169 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
170 |
+
return
|
171 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
172 |
+
if len(file_manifest) == 0:
|
173 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
174 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
175 |
+
return
|
176 |
+
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
crazy_functions/Latex全文翻译.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
fast_debug = False
|
4 |
+
|
5 |
+
class PaperFileGroup():
|
6 |
+
def __init__(self):
|
7 |
+
self.file_paths = []
|
8 |
+
self.file_contents = []
|
9 |
+
self.sp_file_contents = []
|
10 |
+
self.sp_file_index = []
|
11 |
+
self.sp_file_tag = []
|
12 |
+
|
13 |
+
# count_token
|
14 |
+
import tiktoken
|
15 |
+
from toolbox import get_conf
|
16 |
+
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
17 |
+
def get_token_num(txt): return len(enc.encode(txt))
|
18 |
+
self.get_token_num = get_token_num
|
19 |
+
|
20 |
+
def run_file_split(self, max_token_limit=1900):
|
21 |
+
"""
|
22 |
+
将长文本分离开来
|
23 |
+
"""
|
24 |
+
for index, file_content in enumerate(self.file_contents):
|
25 |
+
if self.get_token_num(file_content) < max_token_limit:
|
26 |
+
self.sp_file_contents.append(file_content)
|
27 |
+
self.sp_file_index.append(index)
|
28 |
+
self.sp_file_tag.append(self.file_paths[index])
|
29 |
+
else:
|
30 |
+
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
31 |
+
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
32 |
+
for j, segment in enumerate(segments):
|
33 |
+
self.sp_file_contents.append(segment)
|
34 |
+
self.sp_file_index.append(index)
|
35 |
+
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
36 |
+
|
37 |
+
print('Segmentation: done')
|
38 |
+
|
39 |
+
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
40 |
+
import time, os, re
|
41 |
+
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
42 |
+
|
43 |
+
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
44 |
+
pfg = PaperFileGroup()
|
45 |
+
|
46 |
+
for index, fp in enumerate(file_manifest):
|
47 |
+
with open(fp, 'r', encoding='utf-8') as f:
|
48 |
+
file_content = f.read()
|
49 |
+
# 定义注释的正则表达式
|
50 |
+
comment_pattern = r'%.*'
|
51 |
+
# 使用正则表达式查找注释,并替换为空字符串
|
52 |
+
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
53 |
+
# 记录删除注释后的文本
|
54 |
+
pfg.file_paths.append(fp)
|
55 |
+
pfg.file_contents.append(clean_tex_content)
|
56 |
+
|
57 |
+
# <-------- 拆分过长的latex文件 ---------->
|
58 |
+
pfg.run_file_split(max_token_limit=1024)
|
59 |
+
n_split = len(pfg.sp_file_contents)
|
60 |
+
|
61 |
+
# <-------- 抽取摘要 ---------->
|
62 |
+
# if language == 'en':
|
63 |
+
# abs_extract_inputs = f"Please write an abstract for this paper"
|
64 |
+
|
65 |
+
# # 单线,获取文章meta信息
|
66 |
+
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
67 |
+
# inputs=abs_extract_inputs,
|
68 |
+
# inputs_show_user=f"正在抽取摘要信息。",
|
69 |
+
# llm_kwargs=llm_kwargs,
|
70 |
+
# chatbot=chatbot, history=[],
|
71 |
+
# sys_prompt="Your job is to collect information from materials。",
|
72 |
+
# )
|
73 |
+
|
74 |
+
# <-------- 多线程润色开始 ---------->
|
75 |
+
if language == 'en->zh':
|
76 |
+
inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
|
77 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
78 |
+
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
79 |
+
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
80 |
+
elif language == 'zh->en':
|
81 |
+
inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
|
82 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
83 |
+
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
84 |
+
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
85 |
+
|
86 |
+
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
87 |
+
inputs_array=inputs_array,
|
88 |
+
inputs_show_user_array=inputs_show_user_array,
|
89 |
+
llm_kwargs=llm_kwargs,
|
90 |
+
chatbot=chatbot,
|
91 |
+
history_array=[[""] for _ in range(n_split)],
|
92 |
+
sys_prompt_array=sys_prompt_array,
|
93 |
+
max_workers=10, # OpenAI所允许的最大并行过载
|
94 |
+
scroller_max_len = 80
|
95 |
+
)
|
96 |
+
|
97 |
+
# <-------- 整理结果,退出 ---------->
|
98 |
+
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
99 |
+
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
100 |
+
history = gpt_response_collection
|
101 |
+
chatbot.append((f"{fp}完成了吗?", res))
|
102 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
@CatchException
|
109 |
+
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
110 |
+
# 基本信息:功能、贡献者
|
111 |
+
chatbot.append([
|
112 |
+
"函数插件功能?",
|
113 |
+
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
114 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
115 |
+
|
116 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
117 |
+
try:
|
118 |
+
import tiktoken
|
119 |
+
except:
|
120 |
+
report_execption(chatbot, history,
|
121 |
+
a=f"解析项目: {txt}",
|
122 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
123 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
124 |
+
return
|
125 |
+
history = [] # 清空历史,以免输入溢出
|
126 |
+
import glob, os
|
127 |
+
if os.path.exists(txt):
|
128 |
+
project_folder = txt
|
129 |
+
else:
|
130 |
+
if txt == "": txt = '空空如也的输入栏'
|
131 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
132 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
133 |
+
return
|
134 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
135 |
+
if len(file_manifest) == 0:
|
136 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
137 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
138 |
+
return
|
139 |
+
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
@CatchException
|
146 |
+
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
147 |
+
# 基本信息:功能、贡献者
|
148 |
+
chatbot.append([
|
149 |
+
"函数插件功能?",
|
150 |
+
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
151 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
152 |
+
|
153 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
154 |
+
try:
|
155 |
+
import tiktoken
|
156 |
+
except:
|
157 |
+
report_execption(chatbot, history,
|
158 |
+
a=f"解析项目: {txt}",
|
159 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
160 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
161 |
+
return
|
162 |
+
history = [] # 清空历史,以免输入溢出
|
163 |
+
import glob, os
|
164 |
+
if os.path.exists(txt):
|
165 |
+
project_folder = txt
|
166 |
+
else:
|
167 |
+
if txt == "": txt = '空空如也的输入栏'
|
168 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
169 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
170 |
+
return
|
171 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
172 |
+
if len(file_manifest) == 0:
|
173 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
174 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
175 |
+
return
|
176 |
+
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
crazy_functions/crazy_utils.py
ADDED
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback
|
2 |
+
from toolbox import update_ui
|
3 |
+
|
4 |
+
def input_clipping(inputs, history, max_token_limit):
|
5 |
+
import tiktoken
|
6 |
+
import numpy as np
|
7 |
+
from toolbox import get_conf
|
8 |
+
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
9 |
+
def get_token_num(txt): return len(enc.encode(txt))
|
10 |
+
|
11 |
+
mode = 'input-and-history'
|
12 |
+
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
13 |
+
input_token_num = get_token_num(inputs)
|
14 |
+
if input_token_num < max_token_limit//2:
|
15 |
+
mode = 'only-history'
|
16 |
+
max_token_limit = max_token_limit - input_token_num
|
17 |
+
|
18 |
+
everything = [inputs] if mode == 'input-and-history' else ['']
|
19 |
+
everything.extend(history)
|
20 |
+
n_token = get_token_num('\n'.join(everything))
|
21 |
+
everything_token = [get_token_num(e) for e in everything]
|
22 |
+
delta = max(everything_token) // 16 # 截断时的颗粒度
|
23 |
+
|
24 |
+
while n_token > max_token_limit:
|
25 |
+
where = np.argmax(everything_token)
|
26 |
+
encoded = enc.encode(everything[where])
|
27 |
+
clipped_encoded = encoded[:len(encoded)-delta]
|
28 |
+
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
|
29 |
+
everything_token[where] = get_token_num(everything[where])
|
30 |
+
n_token = get_token_num('\n'.join(everything))
|
31 |
+
|
32 |
+
if mode == 'input-and-history':
|
33 |
+
inputs = everything[0]
|
34 |
+
else:
|
35 |
+
pass
|
36 |
+
history = everything[1:]
|
37 |
+
return inputs, history
|
38 |
+
|
39 |
+
def request_gpt_model_in_new_thread_with_ui_alive(
|
40 |
+
inputs, inputs_show_user, llm_kwargs,
|
41 |
+
chatbot, history, sys_prompt, refresh_interval=0.2,
|
42 |
+
handle_token_exceed=True,
|
43 |
+
retry_times_at_unknown_error=2,
|
44 |
+
):
|
45 |
+
"""
|
46 |
+
Request GPT model,请求GPT模型同时维持用户界面活跃。
|
47 |
+
|
48 |
+
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
49 |
+
inputs (string): List of inputs (输入)
|
50 |
+
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
51 |
+
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
|
52 |
+
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
|
53 |
+
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
54 |
+
history (list): List of chat history (历史,对话历史列表)
|
55 |
+
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
56 |
+
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
57 |
+
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
58 |
+
retry_times_at_unknown_error:失败时的重试次数
|
59 |
+
|
60 |
+
输出 Returns:
|
61 |
+
future: 输出,GPT返回的结果
|
62 |
+
"""
|
63 |
+
import time
|
64 |
+
from concurrent.futures import ThreadPoolExecutor
|
65 |
+
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
66 |
+
# 用户反馈
|
67 |
+
chatbot.append([inputs_show_user, ""])
|
68 |
+
msg = '正常'
|
69 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
70 |
+
executor = ThreadPoolExecutor(max_workers=16)
|
71 |
+
mutable = ["", time.time()]
|
72 |
+
def _req_gpt(inputs, history, sys_prompt):
|
73 |
+
retry_op = retry_times_at_unknown_error
|
74 |
+
exceeded_cnt = 0
|
75 |
+
while True:
|
76 |
+
try:
|
77 |
+
# 【第一种情况】:顺利完成
|
78 |
+
result = predict_no_ui_long_connection(
|
79 |
+
inputs=inputs, llm_kwargs=llm_kwargs,
|
80 |
+
history=history, sys_prompt=sys_prompt, observe_window=mutable)
|
81 |
+
return result
|
82 |
+
except ConnectionAbortedError as token_exceeded_error:
|
83 |
+
# 【第二种情况】:Token溢出
|
84 |
+
if handle_token_exceed:
|
85 |
+
exceeded_cnt += 1
|
86 |
+
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
87 |
+
from toolbox import get_reduce_token_percent
|
88 |
+
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
89 |
+
MAX_TOKEN = 4096
|
90 |
+
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
91 |
+
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
92 |
+
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
93 |
+
continue # 返回重试
|
94 |
+
else:
|
95 |
+
# 【选择放弃】
|
96 |
+
tb_str = '```\n' + traceback.format_exc() + '```'
|
97 |
+
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
98 |
+
return mutable[0] # 放弃
|
99 |
+
except:
|
100 |
+
# 【第三种情况】:其他错误:重试几次
|
101 |
+
tb_str = '```\n' + traceback.format_exc() + '```'
|
102 |
+
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
103 |
+
if retry_op > 0:
|
104 |
+
retry_op -= 1
|
105 |
+
mutable[0] += f"[Local Message] 重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
|
106 |
+
time.sleep(5)
|
107 |
+
continue # 返回重试
|
108 |
+
else:
|
109 |
+
time.sleep(5)
|
110 |
+
return mutable[0] # 放弃
|
111 |
+
|
112 |
+
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
|
113 |
+
while True:
|
114 |
+
# yield一次以刷新前端页面
|
115 |
+
time.sleep(refresh_interval)
|
116 |
+
# “喂狗”(看门狗)
|
117 |
+
mutable[1] = time.time()
|
118 |
+
if future.done():
|
119 |
+
break
|
120 |
+
chatbot[-1] = [chatbot[-1][0], mutable[0]]
|
121 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
122 |
+
|
123 |
+
final_result = future.result()
|
124 |
+
chatbot[-1] = [chatbot[-1][0], final_result]
|
125 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
126 |
+
return final_result
|
127 |
+
|
128 |
+
|
129 |
+
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
130 |
+
inputs_array, inputs_show_user_array, llm_kwargs,
|
131 |
+
chatbot, history_array, sys_prompt_array,
|
132 |
+
refresh_interval=0.2, max_workers=10, scroller_max_len=30,
|
133 |
+
handle_token_exceed=True, show_user_at_complete=False,
|
134 |
+
retry_times_at_unknown_error=2,
|
135 |
+
):
|
136 |
+
"""
|
137 |
+
Request GPT model using multiple threads with UI and high efficiency
|
138 |
+
请求GPT模型的[多线程]版。
|
139 |
+
具备以下功能:
|
140 |
+
实时在UI上反馈远程数据流
|
141 |
+
使用线程池,可调节线程池的大小避免openai的流量限制错误
|
142 |
+
处理中途中止的情况
|
143 |
+
网络等出问题时,会把traceback和已经接收的数据转入输出
|
144 |
+
|
145 |
+
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
146 |
+
inputs_array (list): List of inputs (每个子任务的输入)
|
147 |
+
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
148 |
+
llm_kwargs: llm_kwargs参数
|
149 |
+
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
|
150 |
+
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
|
151 |
+
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
152 |
+
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
153 |
+
max_workers (int, optional): Maximum number of threads (default: 10) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
|
154 |
+
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
|
155 |
+
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
|
156 |
+
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
157 |
+
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
|
158 |
+
retry_times_at_unknown_error:子任务失败时的重试次数
|
159 |
+
|
160 |
+
输出 Returns:
|
161 |
+
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
|
162 |
+
"""
|
163 |
+
import time, random
|
164 |
+
from concurrent.futures import ThreadPoolExecutor
|
165 |
+
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
166 |
+
assert len(inputs_array) == len(history_array)
|
167 |
+
assert len(inputs_array) == len(sys_prompt_array)
|
168 |
+
executor = ThreadPoolExecutor(max_workers=max_workers)
|
169 |
+
n_frag = len(inputs_array)
|
170 |
+
# 用户反馈
|
171 |
+
chatbot.append(["请开始多线程操作。", ""])
|
172 |
+
msg = '正常'
|
173 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
174 |
+
# 异步原子
|
175 |
+
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
176 |
+
|
177 |
+
def _req_gpt(index, inputs, history, sys_prompt):
|
178 |
+
gpt_say = ""
|
179 |
+
retry_op = retry_times_at_unknown_error
|
180 |
+
exceeded_cnt = 0
|
181 |
+
mutable[index][2] = "执行中"
|
182 |
+
while True:
|
183 |
+
try:
|
184 |
+
# 【第一种情况】:顺利完成
|
185 |
+
# time.sleep(10); raise RuntimeError("测试")
|
186 |
+
gpt_say = predict_no_ui_long_connection(
|
187 |
+
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
188 |
+
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
189 |
+
)
|
190 |
+
mutable[index][2] = "已成功"
|
191 |
+
return gpt_say
|
192 |
+
except ConnectionAbortedError as token_exceeded_error:
|
193 |
+
# 【第二种情况】:Token溢出,
|
194 |
+
if handle_token_exceed:
|
195 |
+
exceeded_cnt += 1
|
196 |
+
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
197 |
+
from toolbox import get_reduce_token_percent
|
198 |
+
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
199 |
+
MAX_TOKEN = 4096
|
200 |
+
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
201 |
+
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
202 |
+
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
203 |
+
mutable[index][2] = f"截断重试"
|
204 |
+
continue # 返回重试
|
205 |
+
else:
|
206 |
+
# 【选择放弃】
|
207 |
+
tb_str = '```\n' + traceback.format_exc() + '```'
|
208 |
+
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
209 |
+
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
210 |
+
mutable[index][2] = "输入过长已放弃"
|
211 |
+
return gpt_say # 放弃
|
212 |
+
except:
|
213 |
+
# 【第三种情况】:其他错误
|
214 |
+
tb_str = '```\n' + traceback.format_exc() + '```'
|
215 |
+
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
216 |
+
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
217 |
+
if retry_op > 0:
|
218 |
+
retry_op -= 1
|
219 |
+
wait = random.randint(5, 20)
|
220 |
+
for i in range(wait):# 也许等待十几秒后,情况会好转
|
221 |
+
mutable[index][2] = f"等待重试 {wait-i}"; time.sleep(1)
|
222 |
+
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
223 |
+
continue # 返回重试
|
224 |
+
else:
|
225 |
+
mutable[index][2] = "已失败"
|
226 |
+
wait = 5
|
227 |
+
time.sleep(5)
|
228 |
+
return gpt_say # 放弃
|
229 |
+
|
230 |
+
# 异步任务开始
|
231 |
+
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
|
232 |
+
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
233 |
+
cnt = 0
|
234 |
+
while True:
|
235 |
+
# yield一次以刷新前端页面
|
236 |
+
time.sleep(refresh_interval)
|
237 |
+
cnt += 1
|
238 |
+
worker_done = [h.done() for h in futures]
|
239 |
+
if all(worker_done):
|
240 |
+
executor.shutdown()
|
241 |
+
break
|
242 |
+
# 更好的UI视觉效果
|
243 |
+
observe_win = []
|
244 |
+
# print([mutable[thread_index][2] for thread_index, _ in enumerate(worker_done)])
|
245 |
+
# 每个线程都要“喂狗”(看门狗)
|
246 |
+
for thread_index, _ in enumerate(worker_done):
|
247 |
+
mutable[thread_index][1] = time.time()
|
248 |
+
# 在前端打印些好玩的东西
|
249 |
+
for thread_index, _ in enumerate(worker_done):
|
250 |
+
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
251 |
+
replace('\n', '').replace('```', '...').replace(
|
252 |
+
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
253 |
+
observe_win.append(print_something_really_funny)
|
254 |
+
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
255 |
+
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
256 |
+
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
257 |
+
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
258 |
+
msg = "正常"
|
259 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
260 |
+
# 异步任务结束
|
261 |
+
gpt_response_collection = []
|
262 |
+
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
263 |
+
gpt_res = f.result()
|
264 |
+
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
265 |
+
|
266 |
+
if show_user_at_complete:
|
267 |
+
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
268 |
+
gpt_res = f.result()
|
269 |
+
chatbot.append([inputs_show_user, gpt_res])
|
270 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
271 |
+
time.sleep(1)
|
272 |
+
return gpt_response_collection
|
273 |
+
|
274 |
+
|
275 |
+
def WithRetry(f):
|
276 |
+
"""
|
277 |
+
装饰器函数,用于自动重试。
|
278 |
+
"""
|
279 |
+
def decorated(retry, res_when_fail, *args, **kwargs):
|
280 |
+
assert retry >= 0
|
281 |
+
while True:
|
282 |
+
try:
|
283 |
+
res = yield from f(*args, **kwargs)
|
284 |
+
return res
|
285 |
+
except:
|
286 |
+
retry -= 1
|
287 |
+
if retry<0:
|
288 |
+
print("达到最大重试次数")
|
289 |
+
break
|
290 |
+
else:
|
291 |
+
print("重试中……")
|
292 |
+
continue
|
293 |
+
return res_when_fail
|
294 |
+
return decorated
|
295 |
+
|
296 |
+
|
297 |
+
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
298 |
+
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
299 |
+
if get_token_fn(txt_tocut) <= limit:
|
300 |
+
return [txt_tocut]
|
301 |
+
else:
|
302 |
+
lines = txt_tocut.split('\n')
|
303 |
+
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
304 |
+
estimated_line_cut = int(estimated_line_cut)
|
305 |
+
for cnt in reversed(range(estimated_line_cut)):
|
306 |
+
if must_break_at_empty_line:
|
307 |
+
if lines[cnt] != "":
|
308 |
+
continue
|
309 |
+
print(cnt)
|
310 |
+
prev = "\n".join(lines[:cnt])
|
311 |
+
post = "\n".join(lines[cnt:])
|
312 |
+
if get_token_fn(prev) < limit:
|
313 |
+
break
|
314 |
+
if cnt == 0:
|
315 |
+
print('what the fuck ?')
|
316 |
+
raise RuntimeError("存在一行极长的文本!")
|
317 |
+
# print(len(post))
|
318 |
+
# 列表递归接龙
|
319 |
+
result = [prev]
|
320 |
+
result.extend(cut(post, must_break_at_empty_line))
|
321 |
+
return result
|
322 |
+
try:
|
323 |
+
return cut(txt, must_break_at_empty_line=True)
|
324 |
+
except RuntimeError:
|
325 |
+
return cut(txt, must_break_at_empty_line=False)
|
326 |
+
|
327 |
+
|
328 |
+
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
329 |
+
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
330 |
+
if get_token_fn(txt_tocut) <= limit:
|
331 |
+
return [txt_tocut]
|
332 |
+
else:
|
333 |
+
lines = txt_tocut.split('\n')
|
334 |
+
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
335 |
+
estimated_line_cut = int(estimated_line_cut)
|
336 |
+
cnt = 0
|
337 |
+
for cnt in reversed(range(estimated_line_cut)):
|
338 |
+
if must_break_at_empty_line:
|
339 |
+
if lines[cnt] != "":
|
340 |
+
continue
|
341 |
+
print(cnt)
|
342 |
+
prev = "\n".join(lines[:cnt])
|
343 |
+
post = "\n".join(lines[cnt:])
|
344 |
+
if get_token_fn(prev) < limit:
|
345 |
+
break
|
346 |
+
if cnt == 0:
|
347 |
+
# print('what the fuck ? 存在一行极长的文本!')
|
348 |
+
raise RuntimeError("存在一行极长的文本!")
|
349 |
+
# print(len(post))
|
350 |
+
# 列表递归接龙
|
351 |
+
result = [prev]
|
352 |
+
result.extend(cut(post, must_break_at_empty_line))
|
353 |
+
return result
|
354 |
+
try:
|
355 |
+
return cut(txt, must_break_at_empty_line=True)
|
356 |
+
except RuntimeError:
|
357 |
+
try:
|
358 |
+
return cut(txt, must_break_at_empty_line=False)
|
359 |
+
except RuntimeError:
|
360 |
+
# 这个中文的句号是故意的,作为一个标识而存在
|
361 |
+
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False)
|
362 |
+
return [r.replace('。\n', '.') for r in res]
|
crazy_functions/下载arxiv论文翻译摘要.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file,
|
3 |
import re, requests, unicodedata, os
|
4 |
-
|
5 |
def download_arxiv_(url_pdf):
|
6 |
if 'arxiv.org' not in url_pdf:
|
7 |
if ('.' in url_pdf) and ('/' not in url_pdf):
|
@@ -132,7 +132,7 @@ def get_name(_url_):
|
|
132 |
|
133 |
|
134 |
@CatchException
|
135 |
-
def 下载arxiv论文并翻译摘要(txt,
|
136 |
|
137 |
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
138 |
import glob
|
@@ -140,7 +140,7 @@ def 下载arxiv论文并翻译摘要(txt, top_p, api_key, temperature, chatbot,
|
|
140 |
|
141 |
# 基本信息:功能、贡献者
|
142 |
chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO])
|
143 |
-
yield chatbot, history
|
144 |
|
145 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
146 |
try:
|
@@ -149,7 +149,7 @@ def 下载arxiv论文并翻译摘要(txt, top_p, api_key, temperature, chatbot,
|
|
149 |
report_execption(chatbot, history,
|
150 |
a = f"解析项目: {txt}",
|
151 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
152 |
-
yield chatbot, history
|
153 |
return
|
154 |
|
155 |
# 清空历史,以免输入溢出
|
@@ -162,25 +162,33 @@ def 下载arxiv论文并翻译摘要(txt, top_p, api_key, temperature, chatbot,
|
|
162 |
report_execption(chatbot, history,
|
163 |
a = f"解析项目: {txt}",
|
164 |
b = f"下载pdf文件未成功")
|
165 |
-
yield chatbot, history
|
166 |
return
|
167 |
|
168 |
# 翻译摘要等
|
169 |
i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}"
|
170 |
i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}'
|
171 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
172 |
-
yield chatbot, history
|
173 |
msg = '正常'
|
174 |
# ** gpt request **
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
177 |
history.append(i_say_show_user); history.append(gpt_say)
|
178 |
-
yield chatbot, history, msg
|
179 |
# 写入文件
|
180 |
import shutil
|
181 |
# 重置文件的创建时间
|
182 |
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
183 |
res = write_results_to_file(history)
|
184 |
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
185 |
-
yield chatbot, history, msg
|
186 |
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file, get_conf
|
3 |
import re, requests, unicodedata, os
|
4 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
def download_arxiv_(url_pdf):
|
6 |
if 'arxiv.org' not in url_pdf:
|
7 |
if ('.' in url_pdf) and ('/' not in url_pdf):
|
|
|
132 |
|
133 |
|
134 |
@CatchException
|
135 |
+
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
136 |
|
137 |
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
138 |
import glob
|
|
|
140 |
|
141 |
# 基本信息:功能、贡献者
|
142 |
chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO])
|
143 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
144 |
|
145 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
146 |
try:
|
|
|
149 |
report_execption(chatbot, history,
|
150 |
a = f"解析项目: {txt}",
|
151 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
152 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
153 |
return
|
154 |
|
155 |
# 清空历史,以免输入溢出
|
|
|
162 |
report_execption(chatbot, history,
|
163 |
a = f"解析项目: {txt}",
|
164 |
b = f"下载pdf文件未成功")
|
165 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
166 |
return
|
167 |
|
168 |
# 翻译摘要等
|
169 |
i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}"
|
170 |
i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}'
|
171 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
172 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
173 |
msg = '正常'
|
174 |
# ** gpt request **
|
175 |
+
# 单线,获取文章meta信息
|
176 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
177 |
+
inputs=i_say,
|
178 |
+
inputs_show_user=i_say_show_user,
|
179 |
+
llm_kwargs=llm_kwargs,
|
180 |
+
chatbot=chatbot, history=[],
|
181 |
+
sys_prompt="Your job is to collect information from materials and translate to Chinese。",
|
182 |
+
)
|
183 |
+
|
184 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
185 |
history.append(i_say_show_user); history.append(gpt_say)
|
186 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
187 |
# 写入文件
|
188 |
import shutil
|
189 |
# 重置文件的创建时间
|
190 |
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
191 |
res = write_results_to_file(history)
|
192 |
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
193 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
194 |
|
crazy_functions/代码重写为全英文_多线程.py
CHANGED
@@ -1,61 +1,121 @@
|
|
1 |
import threading
|
2 |
-
from
|
3 |
-
from toolbox import
|
|
|
|
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
@CatchException
|
8 |
-
def 全项目切换英文(txt,
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
13 |
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
14 |
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
15 |
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
|
|
16 |
i_say_show_user_buffer = []
|
17 |
|
18 |
-
#
|
19 |
for index, fp in enumerate(file_manifest):
|
20 |
# if 'test_project' in fp: continue
|
21 |
with open(fp, 'r', encoding='utf-8') as f:
|
22 |
file_content = f.read()
|
23 |
-
i_say_show_user =f'[{index}/{len(file_manifest)}]
|
24 |
i_say_show_user_buffer.append(i_say_show_user)
|
25 |
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
26 |
-
yield chatbot, history
|
|
|
27 |
|
28 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
mutable_return = [None for _ in file_manifest]
|
|
|
30 |
def thread_worker(fp,index):
|
|
|
|
|
|
|
31 |
with open(fp, 'r', encoding='utf-8') as f:
|
32 |
file_content = f.read()
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
#
|
39 |
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
40 |
for h in handles:
|
41 |
h.daemon = True
|
42 |
h.start()
|
43 |
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
44 |
-
yield chatbot, history
|
45 |
|
46 |
-
#
|
47 |
cnt = 0
|
48 |
while True:
|
49 |
-
|
|
|
50 |
th_alive = [h.is_alive() for h in handles]
|
51 |
if not any(th_alive): break
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
57 |
|
58 |
-
#
|
59 |
for index, h in enumerate(handles):
|
60 |
h.join() # 这里其实不需要join了,肯定已经都结束了
|
61 |
fp = file_manifest[index]
|
@@ -63,13 +123,17 @@ def 全项目切换英文(txt, top_p, api_key, temperature, chatbot, history, sy
|
|
63 |
i_say_show_user = i_say_show_user_buffer[index]
|
64 |
|
65 |
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
66 |
-
|
|
|
|
|
|
|
|
|
67 |
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
68 |
history.append(i_say_show_user); history.append(gpt_say)
|
69 |
-
yield chatbot, history
|
70 |
time.sleep(1)
|
71 |
|
72 |
-
#
|
73 |
res = write_results_to_file(history)
|
74 |
chatbot.append(("生成一份任务执行报告", res))
|
75 |
-
yield chatbot, history
|
|
|
1 |
import threading
|
2 |
+
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
3 |
+
from toolbox import update_ui
|
4 |
+
from toolbox import CatchException, write_results_to_file, report_execption
|
5 |
+
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
6 |
|
7 |
+
def extract_code_block_carefully(txt):
|
8 |
+
splitted = txt.split('```')
|
9 |
+
n_code_block_seg = len(splitted) - 1
|
10 |
+
if n_code_block_seg <= 1: return txt
|
11 |
+
# 剩下的情况都开头除去 ``` 结尾除去一次 ```
|
12 |
+
txt_out = '```'.join(splitted[1:-1])
|
13 |
+
return txt_out
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
def break_txt_into_half_at_some_linebreak(txt):
|
18 |
+
lines = txt.split('\n')
|
19 |
+
n_lines = len(lines)
|
20 |
+
pre = lines[:(n_lines//2)]
|
21 |
+
post = lines[(n_lines//2):]
|
22 |
+
return "\n".join(pre), "\n".join(post)
|
23 |
|
24 |
|
25 |
@CatchException
|
26 |
+
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
27 |
+
# 第1步:清空历史,以免输入溢出
|
28 |
+
history = []
|
29 |
+
|
30 |
+
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
31 |
+
try:
|
32 |
+
import tiktoken
|
33 |
+
except:
|
34 |
+
report_execption(chatbot, history,
|
35 |
+
a = f"解析项目: {txt}",
|
36 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
37 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
38 |
+
return
|
39 |
+
|
40 |
+
# 第3步:集合文件
|
41 |
+
import time, glob, os, shutil, re
|
42 |
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
43 |
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
44 |
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
45 |
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
46 |
+
# file_manifest = ['./toolbox.py']
|
47 |
i_say_show_user_buffer = []
|
48 |
|
49 |
+
# 第4步:随便显示点什么防止卡顿的感觉
|
50 |
for index, fp in enumerate(file_manifest):
|
51 |
# if 'test_project' in fp: continue
|
52 |
with open(fp, 'r', encoding='utf-8') as f:
|
53 |
file_content = f.read()
|
54 |
+
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
55 |
i_say_show_user_buffer.append(i_say_show_user)
|
56 |
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
57 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
58 |
+
|
59 |
|
60 |
+
# 第5步:Token限制下的截断与处理
|
61 |
+
MAX_TOKEN = 3000
|
62 |
+
import tiktoken
|
63 |
+
from toolbox import get_conf
|
64 |
+
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
65 |
+
def get_token_fn(txt): return len(enc.encode(txt))
|
66 |
+
|
67 |
+
|
68 |
+
# 第6步:任务函数
|
69 |
mutable_return = [None for _ in file_manifest]
|
70 |
+
observe_window = [[""] for _ in file_manifest]
|
71 |
def thread_worker(fp,index):
|
72 |
+
if index > 10:
|
73 |
+
time.sleep(60)
|
74 |
+
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
75 |
with open(fp, 'r', encoding='utf-8') as f:
|
76 |
file_content = f.read()
|
77 |
+
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
78 |
+
try:
|
79 |
+
gpt_say = ""
|
80 |
+
# 分解代码文件
|
81 |
+
file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
|
82 |
+
for file_content_partial in file_content_breakdown:
|
83 |
+
i_say = i_say_template(fp, file_content_partial)
|
84 |
+
# # ** gpt request **
|
85 |
+
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
86 |
+
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
87 |
+
gpt_say += gpt_say_partial
|
88 |
+
mutable_return[index] = gpt_say
|
89 |
+
except ConnectionAbortedError as token_exceed_err:
|
90 |
+
print('至少一个线程任务Token溢出而失败', e)
|
91 |
+
except Exception as e:
|
92 |
+
print('至少一个线程任务意外失败', e)
|
93 |
|
94 |
+
# 第7步:所有线程同时开始执行任务函数
|
95 |
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
96 |
for h in handles:
|
97 |
h.daemon = True
|
98 |
h.start()
|
99 |
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
100 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
101 |
|
102 |
+
# 第8步:循环轮询各个线程是否执行完毕
|
103 |
cnt = 0
|
104 |
while True:
|
105 |
+
cnt += 1
|
106 |
+
time.sleep(0.2)
|
107 |
th_alive = [h.is_alive() for h in handles]
|
108 |
if not any(th_alive): break
|
109 |
+
# 更好的UI视觉效果
|
110 |
+
observe_win = []
|
111 |
+
for thread_index, alive in enumerate(th_alive):
|
112 |
+
observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
|
113 |
+
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
114 |
+
stat_str = ''.join(stat)
|
115 |
+
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
116 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
117 |
|
118 |
+
# 第9步:把结果写入文件
|
119 |
for index, h in enumerate(handles):
|
120 |
h.join() # 这里其实不需要join了,肯定已经都结束了
|
121 |
fp = file_manifest[index]
|
|
|
123 |
i_say_show_user = i_say_show_user_buffer[index]
|
124 |
|
125 |
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
126 |
+
if gpt_say is not None:
|
127 |
+
with open(where_to_relocate, 'w+', encoding='utf-8') as f:
|
128 |
+
f.write(gpt_say)
|
129 |
+
else: # 失败
|
130 |
+
shutil.copyfile(file_manifest[index], where_to_relocate)
|
131 |
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
132 |
history.append(i_say_show_user); history.append(gpt_say)
|
133 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
134 |
time.sleep(1)
|
135 |
|
136 |
+
# 第10步:备份一个文件
|
137 |
res = write_results_to_file(history)
|
138 |
chatbot.append(("生成一份任务执行报告", res))
|
139 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/总结word文档.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
|
|
3 |
fast_debug = False
|
4 |
|
5 |
|
6 |
-
def 解析docx(file_manifest, project_folder,
|
7 |
import time, os
|
8 |
# pip install python-docx 用于docx格式,跨平台
|
9 |
# pip install pywin32 用于doc格式,仅支持Win平台
|
@@ -35,58 +36,69 @@ def 解析docx(file_manifest, project_folder, top_p, api_key, temperature, chatb
|
|
35 |
f'文章内容是 ```{file_content}```'
|
36 |
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}'
|
37 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
38 |
-
yield chatbot, history
|
39 |
|
40 |
if not fast_debug:
|
41 |
msg = '正常'
|
42 |
# ** gpt request **
|
43 |
-
gpt_say = yield from
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
46 |
-
history.append(i_say_show_user)
|
47 |
history.append(gpt_say)
|
48 |
-
yield chatbot, history, msg
|
49 |
if not fast_debug: time.sleep(2)
|
50 |
|
51 |
"""
|
52 |
# 可按需启用
|
53 |
i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。'
|
54 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
55 |
-
yield chatbot, history
|
56 |
|
57 |
|
58 |
i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \
|
59 |
f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \
|
60 |
f'根据你之前的分析,提出建议'
|
61 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
62 |
-
yield chatbot, history
|
63 |
|
64 |
"""
|
65 |
|
66 |
if not fast_debug:
|
67 |
msg = '正常'
|
68 |
# ** gpt request **
|
69 |
-
gpt_say = yield from
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
72 |
chatbot[-1] = (i_say, gpt_say)
|
73 |
history.append(i_say)
|
74 |
history.append(gpt_say)
|
75 |
-
yield chatbot, history, msg
|
76 |
res = write_results_to_file(history)
|
77 |
chatbot.append(("完成了吗?", res))
|
78 |
-
yield chatbot, history, msg
|
79 |
|
80 |
|
81 |
@CatchException
|
82 |
-
def 总结word文档(txt,
|
83 |
import glob, os
|
84 |
|
85 |
# 基本信息:功能、贡献者
|
86 |
chatbot.append([
|
87 |
"函数插件功能?",
|
88 |
"批量总结Word文档。函数插件贡献者: JasonGuo1"])
|
89 |
-
yield chatbot, history
|
90 |
|
91 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
92 |
try:
|
@@ -95,7 +107,7 @@ def 总结word文档(txt, top_p, api_key, temperature, chatbot, history, systemP
|
|
95 |
report_execption(chatbot, history,
|
96 |
a=f"解��项目: {txt}",
|
97 |
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
98 |
-
yield chatbot, history
|
99 |
return
|
100 |
|
101 |
# 清空历史,以免输入溢出
|
@@ -107,7 +119,7 @@ def 总结word文档(txt, top_p, api_key, temperature, chatbot, history, systemP
|
|
107 |
else:
|
108 |
if txt == "": txt = '空空如也的输入栏'
|
109 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
110 |
-
yield chatbot, history
|
111 |
return
|
112 |
|
113 |
# 搜索需要处理的文件清单
|
@@ -120,8 +132,8 @@ def 总结word文档(txt, top_p, api_key, temperature, chatbot, history, systemP
|
|
120 |
# 如果没找到任何文件
|
121 |
if len(file_manifest) == 0:
|
122 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
123 |
-
yield chatbot, history
|
124 |
return
|
125 |
|
126 |
# 开始正式执行任务
|
127 |
-
yield from 解析docx(file_manifest, project_folder,
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
fast_debug = False
|
5 |
|
6 |
|
7 |
+
def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
8 |
import time, os
|
9 |
# pip install python-docx 用于docx格式,跨平台
|
10 |
# pip install pywin32 用于doc格式,仅支持Win平台
|
|
|
36 |
f'文章内容是 ```{file_content}```'
|
37 |
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}'
|
38 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
39 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
40 |
|
41 |
if not fast_debug:
|
42 |
msg = '正常'
|
43 |
# ** gpt request **
|
44 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
45 |
+
inputs=i_say,
|
46 |
+
inputs_show_user=i_say_show_user,
|
47 |
+
llm_kwargs=llm_kwargs,
|
48 |
+
chatbot=chatbot,
|
49 |
+
history=[],
|
50 |
+
sys_prompt="总结文章。"
|
51 |
+
) # 带超时倒计时
|
52 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
53 |
+
history.append(i_say_show_user)
|
54 |
history.append(gpt_say)
|
55 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
56 |
if not fast_debug: time.sleep(2)
|
57 |
|
58 |
"""
|
59 |
# 可按需启用
|
60 |
i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。'
|
61 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
62 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
63 |
|
64 |
|
65 |
i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \
|
66 |
f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \
|
67 |
f'根据你之前的分析,提出建议'
|
68 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
69 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
70 |
|
71 |
"""
|
72 |
|
73 |
if not fast_debug:
|
74 |
msg = '正常'
|
75 |
# ** gpt request **
|
76 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
77 |
+
inputs=i_say,
|
78 |
+
inputs_show_user=i_say,
|
79 |
+
llm_kwargs=llm_kwargs,
|
80 |
+
chatbot=chatbot,
|
81 |
+
history=history,
|
82 |
+
sys_prompt="总结文章。"
|
83 |
+
) # 带超时倒计时
|
84 |
chatbot[-1] = (i_say, gpt_say)
|
85 |
history.append(i_say)
|
86 |
history.append(gpt_say)
|
87 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
88 |
res = write_results_to_file(history)
|
89 |
chatbot.append(("完成了吗?", res))
|
90 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
91 |
|
92 |
|
93 |
@CatchException
|
94 |
+
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
95 |
import glob, os
|
96 |
|
97 |
# 基本信息:功能、贡献者
|
98 |
chatbot.append([
|
99 |
"函数插件功能?",
|
100 |
"批量总结Word文档。函数插件贡献者: JasonGuo1"])
|
101 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
102 |
|
103 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
104 |
try:
|
|
|
107 |
report_execption(chatbot, history,
|
108 |
a=f"解��项目: {txt}",
|
109 |
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
110 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
111 |
return
|
112 |
|
113 |
# 清空历史,以免输入溢出
|
|
|
119 |
else:
|
120 |
if txt == "": txt = '空空如也的输入栏'
|
121 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
122 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
123 |
return
|
124 |
|
125 |
# 搜索需要处理的文件清单
|
|
|
132 |
# 如果没找到任何文件
|
133 |
if len(file_manifest) == 0:
|
134 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
135 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
136 |
return
|
137 |
|
138 |
# 开始正式执行任务
|
139 |
+
yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/批量总结PDF文档.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
import re
|
4 |
import unicodedata
|
5 |
fast_debug = False
|
|
|
6 |
|
7 |
def is_paragraph_break(match):
|
8 |
"""
|
@@ -57,7 +58,7 @@ def clean_text(raw_text):
|
|
57 |
|
58 |
return final_text.strip()
|
59 |
|
60 |
-
def 解析PDF(file_manifest, project_folder,
|
61 |
import time, glob, os, fitz
|
62 |
print('begin analysis on:', file_manifest)
|
63 |
for index, fp in enumerate(file_manifest):
|
@@ -72,49 +73,60 @@ def 解析PDF(file_manifest, project_folder, top_p, api_key, temperature, chatbo
|
|
72 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
73 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
74 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
75 |
-
|
76 |
-
yield chatbot, history, '正常'
|
77 |
|
78 |
if not fast_debug:
|
79 |
msg = '正常'
|
80 |
# ** gpt request **
|
81 |
-
gpt_say = yield from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
print('[2] end gpt req')
|
84 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
85 |
history.append(i_say_show_user); history.append(gpt_say)
|
86 |
-
|
87 |
-
yield chatbot, history, msg
|
88 |
-
print('[4] next')
|
89 |
if not fast_debug: time.sleep(2)
|
90 |
|
91 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
92 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
93 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
94 |
-
yield chatbot, history
|
95 |
|
96 |
if not fast_debug:
|
97 |
msg = '正常'
|
98 |
# ** gpt request **
|
99 |
-
gpt_say = yield from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
chatbot[-1] = (i_say, gpt_say)
|
102 |
history.append(i_say); history.append(gpt_say)
|
103 |
-
yield chatbot, history, msg
|
104 |
res = write_results_to_file(history)
|
105 |
chatbot.append(("完成了吗?", res))
|
106 |
-
yield chatbot, history, msg
|
107 |
|
108 |
|
109 |
@CatchException
|
110 |
-
def 批量总结PDF文档(txt,
|
111 |
import glob, os
|
112 |
|
113 |
# 基本信息:功能、贡献者
|
114 |
chatbot.append([
|
115 |
"函数插件功能?",
|
116 |
"批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
|
117 |
-
yield chatbot, history
|
118 |
|
119 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
120 |
try:
|
@@ -123,7 +135,7 @@ def 批量总结PDF文档(txt, top_p, api_key, temperature, chatbot, history, sy
|
|
123 |
report_execption(chatbot, history,
|
124 |
a = f"解析项目: {txt}",
|
125 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
126 |
-
yield chatbot, history
|
127 |
return
|
128 |
|
129 |
# 清空历史,以免输入溢出
|
@@ -135,7 +147,7 @@ def 批量总结PDF文档(txt, top_p, api_key, temperature, chatbot, history, sy
|
|
135 |
else:
|
136 |
if txt == "": txt = '空空如也的输入栏'
|
137 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
138 |
-
yield chatbot, history
|
139 |
return
|
140 |
|
141 |
# 搜索需要处理的文件清单
|
@@ -147,8 +159,8 @@ def 批量总结PDF文档(txt, top_p, api_key, temperature, chatbot, history, sy
|
|
147 |
# 如果没找到任何文件
|
148 |
if len(file_manifest) == 0:
|
149 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
150 |
-
yield chatbot, history
|
151 |
return
|
152 |
|
153 |
# 开始正式执行任务
|
154 |
-
yield from 解析PDF(file_manifest, project_folder,
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
import re
|
4 |
import unicodedata
|
5 |
fast_debug = False
|
6 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
7 |
|
8 |
def is_paragraph_break(match):
|
9 |
"""
|
|
|
58 |
|
59 |
return final_text.strip()
|
60 |
|
61 |
+
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
62 |
import time, glob, os, fitz
|
63 |
print('begin analysis on:', file_manifest)
|
64 |
for index, fp in enumerate(file_manifest):
|
|
|
73 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
74 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
75 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
76 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
77 |
|
78 |
if not fast_debug:
|
79 |
msg = '正常'
|
80 |
# ** gpt request **
|
81 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
82 |
+
inputs=i_say,
|
83 |
+
inputs_show_user=i_say_show_user,
|
84 |
+
llm_kwargs=llm_kwargs,
|
85 |
+
chatbot=chatbot,
|
86 |
+
history=[],
|
87 |
+
sys_prompt="总结文章。"
|
88 |
+
) # 带超时倒计时
|
89 |
+
|
90 |
|
|
|
91 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
92 |
history.append(i_say_show_user); history.append(gpt_say)
|
93 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
94 |
if not fast_debug: time.sleep(2)
|
95 |
|
96 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
97 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
98 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
99 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
100 |
|
101 |
if not fast_debug:
|
102 |
msg = '正常'
|
103 |
# ** gpt request **
|
104 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
105 |
+
inputs=i_say,
|
106 |
+
inputs_show_user=i_say,
|
107 |
+
llm_kwargs=llm_kwargs,
|
108 |
+
chatbot=chatbot,
|
109 |
+
history=history,
|
110 |
+
sys_prompt="总结文章。"
|
111 |
+
) # 带超时倒计时
|
112 |
|
113 |
chatbot[-1] = (i_say, gpt_say)
|
114 |
history.append(i_say); history.append(gpt_say)
|
115 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
116 |
res = write_results_to_file(history)
|
117 |
chatbot.append(("完成了吗?", res))
|
118 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
119 |
|
120 |
|
121 |
@CatchException
|
122 |
+
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
123 |
import glob, os
|
124 |
|
125 |
# 基本信息:功能、贡献者
|
126 |
chatbot.append([
|
127 |
"函数插件功能?",
|
128 |
"批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
|
129 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
130 |
|
131 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
132 |
try:
|
|
|
135 |
report_execption(chatbot, history,
|
136 |
a = f"解析项目: {txt}",
|
137 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
138 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
139 |
return
|
140 |
|
141 |
# 清空历史,以免输入溢出
|
|
|
147 |
else:
|
148 |
if txt == "": txt = '空空如也的输入栏'
|
149 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
150 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
151 |
return
|
152 |
|
153 |
# 搜索需要处理的文件清单
|
|
|
159 |
# 如果没找到任何文件
|
160 |
if len(file_manifest) == 0:
|
161 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
162 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
163 |
return
|
164 |
|
165 |
# 开始正式执行任务
|
166 |
+
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/批量总结PDF文档pdfminer.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
|
|
3 |
|
4 |
fast_debug = False
|
5 |
|
@@ -61,7 +62,7 @@ def readPdf(pdfPath):
|
|
61 |
return outTextList
|
62 |
|
63 |
|
64 |
-
def 解析Paper(file_manifest, project_folder,
|
65 |
import time, glob, os
|
66 |
from bs4 import BeautifulSoup
|
67 |
print('begin analysis on:', file_manifest)
|
@@ -77,43 +78,51 @@ def 解析Paper(file_manifest, project_folder, top_p, api_key, temperature, chat
|
|
77 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
78 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
79 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
80 |
-
|
81 |
-
yield chatbot, history, '正常'
|
82 |
|
83 |
if not fast_debug:
|
84 |
msg = '正常'
|
85 |
# ** gpt request **
|
86 |
-
gpt_say = yield from
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
89 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
90 |
history.append(i_say_show_user); history.append(gpt_say)
|
91 |
-
|
92 |
-
yield chatbot, history, msg
|
93 |
-
print('[4] next')
|
94 |
if not fast_debug: time.sleep(2)
|
95 |
|
96 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
97 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
98 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
99 |
-
yield chatbot, history
|
100 |
|
101 |
if not fast_debug:
|
102 |
msg = '正常'
|
103 |
# ** gpt request **
|
104 |
-
gpt_say = yield from
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
chatbot[-1] = (i_say, gpt_say)
|
107 |
history.append(i_say); history.append(gpt_say)
|
108 |
-
yield chatbot, history, msg
|
109 |
res = write_results_to_file(history)
|
110 |
chatbot.append(("完成了吗?", res))
|
111 |
-
yield chatbot, history, msg
|
112 |
|
113 |
|
114 |
|
115 |
@CatchException
|
116 |
-
def 批量总结PDF文档pdfminer(txt,
|
117 |
history = [] # 清空历史,以免输入溢出
|
118 |
import glob, os
|
119 |
|
@@ -121,7 +130,7 @@ def 批量总结PDF文档pdfminer(txt, top_p, api_key, temperature, chatbot, his
|
|
121 |
chatbot.append([
|
122 |
"函数插件功能?",
|
123 |
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
124 |
-
yield chatbot, history
|
125 |
|
126 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
127 |
try:
|
@@ -130,14 +139,14 @@ def 批量总结PDF文档pdfminer(txt, top_p, api_key, temperature, chatbot, his
|
|
130 |
report_execption(chatbot, history,
|
131 |
a = f"解析项目: {txt}",
|
132 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
133 |
-
yield chatbot, history
|
134 |
return
|
135 |
if os.path.exists(txt):
|
136 |
project_folder = txt
|
137 |
else:
|
138 |
if txt == "": txt = '空空如也的输入栏'
|
139 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
140 |
-
yield chatbot, history
|
141 |
return
|
142 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
143 |
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
@@ -145,7 +154,7 @@ def 批量总结PDF文档pdfminer(txt, top_p, api_key, temperature, chatbot, his
|
|
145 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
146 |
if len(file_manifest) == 0:
|
147 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
148 |
-
yield chatbot, history
|
149 |
return
|
150 |
-
yield from 解析Paper(file_manifest, project_folder,
|
151 |
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
|
5 |
fast_debug = False
|
6 |
|
|
|
62 |
return outTextList
|
63 |
|
64 |
|
65 |
+
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
66 |
import time, glob, os
|
67 |
from bs4 import BeautifulSoup
|
68 |
print('begin analysis on:', file_manifest)
|
|
|
78 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
79 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
80 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
81 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
82 |
|
83 |
if not fast_debug:
|
84 |
msg = '正常'
|
85 |
# ** gpt request **
|
86 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
87 |
+
inputs=i_say,
|
88 |
+
inputs_show_user=i_say_show_user,
|
89 |
+
llm_kwargs=llm_kwargs,
|
90 |
+
chatbot=chatbot,
|
91 |
+
history=[],
|
92 |
+
sys_prompt="总结文章。"
|
93 |
+
) # 带超时倒计时
|
94 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
95 |
history.append(i_say_show_user); history.append(gpt_say)
|
96 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
97 |
if not fast_debug: time.sleep(2)
|
98 |
|
99 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
100 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
101 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
102 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
103 |
|
104 |
if not fast_debug:
|
105 |
msg = '正常'
|
106 |
# ** gpt request **
|
107 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
108 |
+
inputs=i_say,
|
109 |
+
inputs_show_user=i_say,
|
110 |
+
llm_kwargs=llm_kwargs,
|
111 |
+
chatbot=chatbot,
|
112 |
+
history=history,
|
113 |
+
sys_prompt="总结文章。"
|
114 |
+
) # 带超时倒计时
|
115 |
chatbot[-1] = (i_say, gpt_say)
|
116 |
history.append(i_say); history.append(gpt_say)
|
117 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
118 |
res = write_results_to_file(history)
|
119 |
chatbot.append(("完成了吗?", res))
|
120 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
121 |
|
122 |
|
123 |
|
124 |
@CatchException
|
125 |
+
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
126 |
history = [] # 清空历史,以免输入溢出
|
127 |
import glob, os
|
128 |
|
|
|
130 |
chatbot.append([
|
131 |
"函数插件功能?",
|
132 |
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
133 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
134 |
|
135 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
136 |
try:
|
|
|
139 |
report_execption(chatbot, history,
|
140 |
a = f"解析项目: {txt}",
|
141 |
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
142 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
143 |
return
|
144 |
if os.path.exists(txt):
|
145 |
project_folder = txt
|
146 |
else:
|
147 |
if txt == "": txt = '空空如也的输入栏'
|
148 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
149 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
150 |
return
|
151 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
152 |
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
|
|
154 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
155 |
if len(file_manifest) == 0:
|
156 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
157 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
158 |
return
|
159 |
+
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
160 |
|
crazy_functions/批量翻译PDF文档_多线程.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
2 |
+
from toolbox import update_ui
|
3 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
+
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
5 |
+
from colorful import *
|
6 |
+
|
7 |
+
def read_and_clean_pdf_text(fp):
|
8 |
+
"""
|
9 |
+
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好,不建议任何人去读这个函数
|
10 |
+
|
11 |
+
**输入参数说明**
|
12 |
+
- `fp`:需要读取和清理文本的pdf文件路径
|
13 |
+
|
14 |
+
**输出参数说明**
|
15 |
+
- `meta_txt`:清理后的文本内容字符串
|
16 |
+
- `page_one_meta`:第一页清理后的文本内容列表
|
17 |
+
|
18 |
+
**函数功能**
|
19 |
+
读取pdf文件并清理其中的文本内容,清理规则包括:
|
20 |
+
- 提取所有块元的文本信息,并合并为一个字符串
|
21 |
+
- 去除短块(字符数小于100)并替换为回车符
|
22 |
+
- 清理多余的空行
|
23 |
+
- 合并小写字母开头的段落块并替换为空格
|
24 |
+
- 清除重复的换行
|
25 |
+
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
|
26 |
+
"""
|
27 |
+
import fitz, copy
|
28 |
+
import re
|
29 |
+
import numpy as np
|
30 |
+
fc = 0
|
31 |
+
fs = 1
|
32 |
+
fb = 2
|
33 |
+
REMOVE_FOOT_NOTE = True
|
34 |
+
REMOVE_FOOT_FFSIZE_PERCENT = 0.95
|
35 |
+
def primary_ffsize(l):
|
36 |
+
fsize_statiscs = {}
|
37 |
+
for wtf in l['spans']:
|
38 |
+
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
39 |
+
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
40 |
+
return max(fsize_statiscs, key=fsize_statiscs.get)
|
41 |
+
|
42 |
+
def ffsize_same(a,b):
|
43 |
+
return abs((a-b)/max(a,b)) < 0.02
|
44 |
+
# file_content = ""
|
45 |
+
with fitz.open(fp) as doc:
|
46 |
+
meta_txt = []
|
47 |
+
meta_font = []
|
48 |
+
|
49 |
+
meta_line = []
|
50 |
+
meta_span = []
|
51 |
+
for index, page in enumerate(doc):
|
52 |
+
# file_content += page.get_text()
|
53 |
+
text_areas = page.get_text("dict") # 获取页面上的文本信息
|
54 |
+
for t in text_areas['blocks']:
|
55 |
+
if 'lines' in t:
|
56 |
+
pf = 998
|
57 |
+
for l in t['lines']:
|
58 |
+
txt_line = "".join([wtf['text'] for wtf in l['spans']])
|
59 |
+
pf = primary_ffsize(l)
|
60 |
+
meta_line.append([txt_line, pf, l['bbox'], l])
|
61 |
+
for wtf in l['spans']: # for l in t['lines']:
|
62 |
+
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
|
63 |
+
# meta_line.append(["NEW_BLOCK", pf])
|
64 |
+
# 块元提取 for each word segment with in line for each line cross-line words for each block
|
65 |
+
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
66 |
+
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
67 |
+
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
68 |
+
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
|
69 |
+
if index == 0:
|
70 |
+
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
71 |
+
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
72 |
+
# 获取正文主字体
|
73 |
+
fsize_statiscs = {}
|
74 |
+
for span in meta_span:
|
75 |
+
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
76 |
+
fsize_statiscs[span[1]] += span[2]
|
77 |
+
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
78 |
+
if REMOVE_FOOT_NOTE:
|
79 |
+
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
80 |
+
|
81 |
+
# 切分和重新整合
|
82 |
+
mega_sec = []
|
83 |
+
sec = []
|
84 |
+
for index, line in enumerate(meta_line):
|
85 |
+
if index == 0:
|
86 |
+
sec.append(line[fc])
|
87 |
+
continue
|
88 |
+
if REMOVE_FOOT_NOTE:
|
89 |
+
if meta_line[index][fs] <= give_up_fize_threshold:
|
90 |
+
continue
|
91 |
+
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
|
92 |
+
# 尝试识别段落
|
93 |
+
if meta_line[index][fc].endswith('.') and\
|
94 |
+
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
|
95 |
+
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
|
96 |
+
sec[-1] += line[fc]
|
97 |
+
sec[-1] += "\n\n"
|
98 |
+
else:
|
99 |
+
sec[-1] += " "
|
100 |
+
sec[-1] += line[fc]
|
101 |
+
else:
|
102 |
+
if (index+1 < len(meta_line)) and \
|
103 |
+
meta_line[index][fs] > main_fsize:
|
104 |
+
# 单行 + 字体大
|
105 |
+
mega_sec.append(copy.deepcopy(sec))
|
106 |
+
sec = []
|
107 |
+
sec.append("# " + line[fc])
|
108 |
+
else:
|
109 |
+
# 尝试识别section
|
110 |
+
if meta_line[index-1][fs] > meta_line[index][fs]:
|
111 |
+
sec.append("\n" + line[fc])
|
112 |
+
else:
|
113 |
+
sec.append(line[fc])
|
114 |
+
mega_sec.append(copy.deepcopy(sec))
|
115 |
+
|
116 |
+
finals = []
|
117 |
+
for ms in mega_sec:
|
118 |
+
final = " ".join(ms)
|
119 |
+
final = final.replace('- ', ' ')
|
120 |
+
finals.append(final)
|
121 |
+
meta_txt = finals
|
122 |
+
|
123 |
+
def 把字符太少的块清除为回车(meta_txt):
|
124 |
+
for index, block_txt in enumerate(meta_txt):
|
125 |
+
if len(block_txt) < 100:
|
126 |
+
meta_txt[index] = '\n'
|
127 |
+
return meta_txt
|
128 |
+
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
129 |
+
|
130 |
+
def 清理多余的空行(meta_txt):
|
131 |
+
for index in reversed(range(1, len(meta_txt))):
|
132 |
+
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
|
133 |
+
meta_txt.pop(index)
|
134 |
+
return meta_txt
|
135 |
+
meta_txt = 清理多余的空行(meta_txt)
|
136 |
+
|
137 |
+
def 合并小写开头的段落块(meta_txt):
|
138 |
+
def starts_with_lowercase_word(s):
|
139 |
+
pattern = r"^[a-z]+"
|
140 |
+
match = re.match(pattern, s)
|
141 |
+
if match:
|
142 |
+
return True
|
143 |
+
else:
|
144 |
+
return False
|
145 |
+
for _ in range(100):
|
146 |
+
for index, block_txt in enumerate(meta_txt):
|
147 |
+
if starts_with_lowercase_word(block_txt):
|
148 |
+
if meta_txt[index-1] != '\n':
|
149 |
+
meta_txt[index-1] += ' '
|
150 |
+
else:
|
151 |
+
meta_txt[index-1] = ''
|
152 |
+
meta_txt[index-1] += meta_txt[index]
|
153 |
+
meta_txt[index] = '\n'
|
154 |
+
return meta_txt
|
155 |
+
meta_txt = 合并小写开头的段落块(meta_txt)
|
156 |
+
meta_txt = 清理多余的空行(meta_txt)
|
157 |
+
|
158 |
+
meta_txt = '\n'.join(meta_txt)
|
159 |
+
# 清除重复的换行
|
160 |
+
for _ in range(5):
|
161 |
+
meta_txt = meta_txt.replace('\n\n', '\n')
|
162 |
+
|
163 |
+
# 换行 -> 双换行
|
164 |
+
meta_txt = meta_txt.replace('\n', '\n\n')
|
165 |
+
|
166 |
+
for f in finals:
|
167 |
+
print亮黄(f)
|
168 |
+
print亮绿('***************************')
|
169 |
+
|
170 |
+
return meta_txt, page_one_meta
|
171 |
+
|
172 |
+
|
173 |
+
@CatchException
|
174 |
+
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
175 |
+
import glob
|
176 |
+
import os
|
177 |
+
|
178 |
+
# 基本信息:功能、贡献者
|
179 |
+
chatbot.append([
|
180 |
+
"函数插件功能?",
|
181 |
+
"批量总结PDF文档。函数插件贡献者: Binary-Husky"])
|
182 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
183 |
+
|
184 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
185 |
+
try:
|
186 |
+
import fitz
|
187 |
+
import tiktoken
|
188 |
+
except:
|
189 |
+
report_execption(chatbot, history,
|
190 |
+
a=f"解析项目: {txt}",
|
191 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken```。")
|
192 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
193 |
+
return
|
194 |
+
|
195 |
+
# 清空历史,以免输入溢出
|
196 |
+
history = []
|
197 |
+
|
198 |
+
# 检测输入参数,如没有给定输入参数,直接退出
|
199 |
+
if os.path.exists(txt):
|
200 |
+
project_folder = txt
|
201 |
+
else:
|
202 |
+
if txt == "":
|
203 |
+
txt = '空空如也的输入栏'
|
204 |
+
report_execption(chatbot, history,
|
205 |
+
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
206 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
207 |
+
return
|
208 |
+
|
209 |
+
# 搜索需要处理的文件清单
|
210 |
+
file_manifest = [f for f in glob.glob(
|
211 |
+
f'{project_folder}/**/*.pdf', recursive=True)]
|
212 |
+
|
213 |
+
# 如果没找到任何文件
|
214 |
+
if len(file_manifest) == 0:
|
215 |
+
report_execption(chatbot, history,
|
216 |
+
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
217 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
218 |
+
return
|
219 |
+
|
220 |
+
# 开始正式执行任务
|
221 |
+
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt)
|
222 |
+
|
223 |
+
|
224 |
+
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt):
|
225 |
+
import os
|
226 |
+
import tiktoken
|
227 |
+
TOKEN_LIMIT_PER_FRAGMENT = 1600
|
228 |
+
generated_conclusion_files = []
|
229 |
+
for index, fp in enumerate(file_manifest):
|
230 |
+
|
231 |
+
# 读取PDF文件
|
232 |
+
file_content, page_one = read_and_clean_pdf_text(fp)
|
233 |
+
|
234 |
+
# 递归地切割PDF文件
|
235 |
+
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
236 |
+
from toolbox import get_conf
|
237 |
+
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
238 |
+
def get_token_num(txt): return len(enc.encode(txt))
|
239 |
+
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
240 |
+
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
241 |
+
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
242 |
+
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
243 |
+
|
244 |
+
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
245 |
+
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
246 |
+
|
247 |
+
# 单线,获取文章meta信息
|
248 |
+
paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
249 |
+
inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}",
|
250 |
+
inputs_show_user=f"请从{fp}中提取出“标题”、“收录会议或期刊”等基本信息。",
|
251 |
+
llm_kwargs=llm_kwargs,
|
252 |
+
chatbot=chatbot, history=[],
|
253 |
+
sys_prompt="Your job is to collect information from materials。",
|
254 |
+
)
|
255 |
+
|
256 |
+
# 多线,翻译
|
257 |
+
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
258 |
+
inputs_array=[
|
259 |
+
f"以下是你需要翻译的论文片段:\n{frag}" for frag in paper_fragments],
|
260 |
+
inputs_show_user_array=[f"\n---\n 原文: \n\n {frag.replace('#', '')} \n---\n 翻译:\n " for frag in paper_fragments],
|
261 |
+
llm_kwargs=llm_kwargs,
|
262 |
+
chatbot=chatbot,
|
263 |
+
history_array=[[paper_meta] for _ in paper_fragments],
|
264 |
+
sys_prompt_array=[
|
265 |
+
"请你作为一个学术翻译,负责把学术论文的片段准确翻译成中文。" for _ in paper_fragments],
|
266 |
+
max_workers=16 # OpenAI所允许的最大并行过载
|
267 |
+
)
|
268 |
+
|
269 |
+
# 整理报告的格式
|
270 |
+
for i,k in enumerate(gpt_response_collection):
|
271 |
+
if i%2==0:
|
272 |
+
gpt_response_collection[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection)//2}]:\n "
|
273 |
+
else:
|
274 |
+
gpt_response_collection[i] = gpt_response_collection[i]
|
275 |
+
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
276 |
+
final.extend(gpt_response_collection)
|
277 |
+
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
278 |
+
res = write_results_to_file(final, file_name=create_report_file_name)
|
279 |
+
|
280 |
+
# 更新UI
|
281 |
+
generated_conclusion_files.append(f'./gpt_log/{create_report_file_name}')
|
282 |
+
chatbot.append((f"{fp}完成了吗?", res))
|
283 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
284 |
+
|
285 |
+
# 准备文件的下载
|
286 |
+
import shutil
|
287 |
+
for pdf_path in generated_conclusion_files:
|
288 |
+
# 重命名文件
|
289 |
+
rename_file = f'./gpt_log/总结论文-{os.path.basename(pdf_path)}'
|
290 |
+
if os.path.exists(rename_file):
|
291 |
+
os.remove(rename_file)
|
292 |
+
shutil.copyfile(pdf_path, rename_file)
|
293 |
+
if os.path.exists(pdf_path):
|
294 |
+
os.remove(pdf_path)
|
295 |
+
chatbot.append(("给出输出文件清单", str(generated_conclusion_files)))
|
296 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/理解PDF文档内容.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
import re
|
4 |
+
import unicodedata
|
5 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
6 |
+
fast_debug = False
|
7 |
+
|
8 |
+
def is_paragraph_break(match):
|
9 |
+
"""
|
10 |
+
根据给定的匹配结果来判断换行符是否表示段落分隔。
|
11 |
+
如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
|
12 |
+
也可以根据之前的内容长度来判断段落是否已经足够长。
|
13 |
+
"""
|
14 |
+
prev_char, next_char = match.groups()
|
15 |
+
|
16 |
+
# 句子结束标志
|
17 |
+
sentence_endings = ".!?"
|
18 |
+
|
19 |
+
# 设定一个最小段落长度阈值
|
20 |
+
min_paragraph_length = 140
|
21 |
+
|
22 |
+
if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
|
23 |
+
return "\n\n"
|
24 |
+
else:
|
25 |
+
return " "
|
26 |
+
|
27 |
+
def normalize_text(text):
|
28 |
+
"""
|
29 |
+
通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。
|
30 |
+
例如,将连字 "fi" 转换为 "f" 和 "i"。
|
31 |
+
"""
|
32 |
+
# 对文本进行归一化处理,分解连字
|
33 |
+
normalized_text = unicodedata.normalize("NFKD", text)
|
34 |
+
|
35 |
+
# 替换其他特殊字符
|
36 |
+
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
|
37 |
+
|
38 |
+
return cleaned_text
|
39 |
+
|
40 |
+
def clean_text(raw_text):
|
41 |
+
"""
|
42 |
+
对从 PDF 提取出的原始文本进行清洗和格式化处理。
|
43 |
+
1. 对原始文本进行归一化处理。
|
44 |
+
2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。
|
45 |
+
3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。
|
46 |
+
"""
|
47 |
+
# 对文本进行归一化处理
|
48 |
+
normalized_text = normalize_text(raw_text)
|
49 |
+
|
50 |
+
# 替换跨行的连词
|
51 |
+
text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
|
52 |
+
|
53 |
+
# 根据前后相邻字符的特点,找到原文本中的换行符
|
54 |
+
newlines = re.compile(r'(\S)\n(\S)')
|
55 |
+
|
56 |
+
# 根据 heuristic 规则,用空格或段落分隔符替换原换行符
|
57 |
+
final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
|
58 |
+
|
59 |
+
return final_text.strip()
|
60 |
+
|
61 |
+
def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
62 |
+
import time, glob, os, fitz
|
63 |
+
print('begin analysis on:', file_name)
|
64 |
+
|
65 |
+
with fitz.open(file_name) as doc:
|
66 |
+
file_content = ""
|
67 |
+
for page in doc:
|
68 |
+
file_content += page.get_text()
|
69 |
+
file_content = clean_text(file_content)
|
70 |
+
# print(file_content)
|
71 |
+
split_number = 10000
|
72 |
+
split_group = (len(file_content)//split_number)+1
|
73 |
+
for i in range(0,split_group):
|
74 |
+
if i==0:
|
75 |
+
prefix = "接下来请你仔细分析下面的论文,学习里面的内容(专业术语、公式、数学概念).并且注意:由于论文内容较多,将分批次发送,每次发送完之后,你只需要回答“接受完成”"
|
76 |
+
i_say = prefix + f'文件名是{file_name},文章内容第{i+1}部分是 ```{file_content[i*split_number:(i+1)*split_number]}```'
|
77 |
+
i_say_show_user = f'文件名是:\n{file_name},\n由于论文内容过长,将分批请求(共{len(file_content)}字符,将分为{split_group}批,每批{split_number}字符)。\n当前发送{i+1}/{split_group}部分'
|
78 |
+
elif i==split_group-1:
|
79 |
+
i_say = f'你只需要回答“所有论文接受完成,请进行下一步”。文章内容第{i+1}/{split_group}部分是 ```{file_content[i*split_number:]}```'
|
80 |
+
i_say_show_user = f'当前发送{i+1}/{split_group}部分'
|
81 |
+
else:
|
82 |
+
i_say = f'你只需要回答“接受完成”。文章内容第{i+1}/{split_group}部分是 ```{file_content[i*split_number:(i+1)*split_number]}```'
|
83 |
+
i_say_show_user = f'当前发送{i+1}/{split_group}部分'
|
84 |
+
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
85 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt="") # 带超时倒计时
|
86 |
+
while "完成" not in gpt_say:
|
87 |
+
i_say = f'你只需要回答“接受完成”。文章内容第{i+1}/{split_group}部分是 ```{file_content[i*split_number:(i+1)*split_number]}```'
|
88 |
+
i_say_show_user = f'出现error,重新发送{i+1}/{split_group}部分'
|
89 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt="") # 带超时倒计时
|
90 |
+
time.sleep(1)
|
91 |
+
chatbot[-1] = (i_say_show_user, gpt_say)
|
92 |
+
history.append(i_say_show_user); history.append(gpt_say)
|
93 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
94 |
+
time.sleep(2)
|
95 |
+
|
96 |
+
i_say = f'接下来,请你扮演一名专业的学术教授,利用你的所有知识并且结合这篇文章,回答我的问题。(请牢记:1.直到我说“退出”,你才能结束任务;2.所有问题需要紧密围绕文章内容;3.如果有公式,请使用tex渲染)'
|
97 |
+
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
98 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
99 |
+
|
100 |
+
# ** gpt request **
|
101 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt="") # 带超时倒计时
|
102 |
+
chatbot[-1] = (i_say, gpt_say)
|
103 |
+
history.append(i_say); history.append(gpt_say)
|
104 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
105 |
+
|
106 |
+
|
107 |
+
@CatchException
|
108 |
+
def 理解PDF文档内容(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
109 |
+
import glob, os
|
110 |
+
|
111 |
+
# 基本信息:功能、贡献者
|
112 |
+
chatbot.append([
|
113 |
+
"函数插件功能?",
|
114 |
+
"理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe。"])
|
115 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
116 |
+
|
117 |
+
import tkinter as tk
|
118 |
+
from tkinter import filedialog
|
119 |
+
|
120 |
+
root = tk.Tk()
|
121 |
+
root.withdraw()
|
122 |
+
txt = filedialog.askopenfilename()
|
123 |
+
|
124 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
125 |
+
try:
|
126 |
+
import fitz
|
127 |
+
except:
|
128 |
+
report_execption(chatbot, history,
|
129 |
+
a = f"解析项目: {txt}",
|
130 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
131 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
132 |
+
return
|
133 |
+
|
134 |
+
# 清空历史,以免输入溢出
|
135 |
+
history = []
|
136 |
+
|
137 |
+
# 开始正式执行任务
|
138 |
+
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
@CatchException
|
143 |
+
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
144 |
+
import glob, os
|
145 |
+
|
146 |
+
# 基本信息:功能、贡献者
|
147 |
+
chatbot.append([
|
148 |
+
"函数插件功能?",
|
149 |
+
"理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe。"])
|
150 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
151 |
+
|
152 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
153 |
+
try:
|
154 |
+
import fitz
|
155 |
+
except:
|
156 |
+
report_execption(chatbot, history,
|
157 |
+
a = f"解析项目: {txt}",
|
158 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
159 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
160 |
+
return
|
161 |
+
|
162 |
+
# 清空历史,以免输入溢出
|
163 |
+
history = []
|
164 |
+
|
165 |
+
# 检测输入参数,如没有给定输入参数,直接退出
|
166 |
+
if os.path.exists(txt):
|
167 |
+
project_folder = txt
|
168 |
+
else:
|
169 |
+
if txt == "":
|
170 |
+
txt = '空空如也的输入栏'
|
171 |
+
report_execption(chatbot, history,
|
172 |
+
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
173 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
174 |
+
return
|
175 |
+
|
176 |
+
# 搜索需要处理的文件清单
|
177 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
178 |
+
# 如果没找到任何文件
|
179 |
+
if len(file_manifest) == 0:
|
180 |
+
report_execption(chatbot, history,
|
181 |
+
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
182 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
183 |
+
return
|
184 |
+
txt = file_manifest[0]
|
185 |
+
# 开始正式执行任务
|
186 |
+
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/生成函数注释.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
|
|
3 |
fast_debug = False
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
import time, glob, os
|
8 |
print('begin analysis on:', file_manifest)
|
9 |
for index, fp in enumerate(file_manifest):
|
10 |
with open(fp, 'r', encoding='utf-8') as f:
|
@@ -13,31 +13,28 @@ def 生成函数注释(file_manifest, project_folder, top_p, api_key, temperatur
|
|
13 |
i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
|
14 |
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
|
15 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
16 |
-
|
17 |
-
yield chatbot, history, '正常'
|
18 |
|
19 |
if not fast_debug:
|
20 |
msg = '正常'
|
21 |
# ** gpt request **
|
22 |
-
gpt_say = yield from
|
|
|
23 |
|
24 |
-
print('[2] end gpt req')
|
25 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
26 |
history.append(i_say_show_user); history.append(gpt_say)
|
27 |
-
|
28 |
-
yield chatbot, history, msg
|
29 |
-
print('[4] next')
|
30 |
if not fast_debug: time.sleep(2)
|
31 |
|
32 |
if not fast_debug:
|
33 |
res = write_results_to_file(history)
|
34 |
chatbot.append(("完成了吗?", res))
|
35 |
-
yield chatbot, history, msg
|
36 |
|
37 |
|
38 |
|
39 |
@CatchException
|
40 |
-
def 批量生成函数注释(txt,
|
41 |
history = [] # 清空历史,以免输入溢出
|
42 |
import glob, os
|
43 |
if os.path.exists(txt):
|
@@ -45,13 +42,13 @@ def 批量生成函数注释(txt, top_p, api_key, temperature, chatbot, history,
|
|
45 |
else:
|
46 |
if txt == "": txt = '空空如也的输入栏'
|
47 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
48 |
-
yield chatbot, history
|
49 |
return
|
50 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
51 |
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
52 |
|
53 |
if len(file_manifest) == 0:
|
54 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
55 |
-
yield chatbot, history
|
56 |
return
|
57 |
-
yield from 生成函数注释(file_manifest, project_folder,
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
fast_debug = False
|
5 |
|
6 |
+
def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
7 |
+
import time, os
|
|
|
8 |
print('begin analysis on:', file_manifest)
|
9 |
for index, fp in enumerate(file_manifest):
|
10 |
with open(fp, 'r', encoding='utf-8') as f:
|
|
|
13 |
i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
|
14 |
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
|
15 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
16 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
17 |
|
18 |
if not fast_debug:
|
19 |
msg = '正常'
|
20 |
# ** gpt request **
|
21 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
22 |
+
i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
23 |
|
|
|
24 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
25 |
history.append(i_say_show_user); history.append(gpt_say)
|
26 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
27 |
if not fast_debug: time.sleep(2)
|
28 |
|
29 |
if not fast_debug:
|
30 |
res = write_results_to_file(history)
|
31 |
chatbot.append(("完成了吗?", res))
|
32 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
33 |
|
34 |
|
35 |
|
36 |
@CatchException
|
37 |
+
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
38 |
history = [] # 清空历史,以免输入溢出
|
39 |
import glob, os
|
40 |
if os.path.exists(txt):
|
|
|
42 |
else:
|
43 |
if txt == "": txt = '空空如也的输入栏'
|
44 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
45 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
46 |
return
|
47 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
48 |
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
49 |
|
50 |
if len(file_manifest) == 0:
|
51 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
52 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
53 |
return
|
54 |
+
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/解析项目源代码.py
CHANGED
@@ -1,96 +1,115 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
for index, fp in enumerate(file_manifest):
|
9 |
with open(fp, 'r', encoding='utf-8') as f:
|
10 |
file_content = f.read()
|
11 |
-
|
12 |
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
13 |
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
14 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
|
49 |
@CatchException
|
50 |
-
def 解析项目本身(txt,
|
51 |
history = [] # 清空历史,以免输入溢出
|
52 |
-
import
|
53 |
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
54 |
-
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
63 |
-
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
64 |
-
yield chatbot, history, '正常'
|
65 |
-
|
66 |
-
if not fast_debug:
|
67 |
-
# ** gpt request **
|
68 |
-
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, api_key=api_key, temperature=temperature)
|
69 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, api_key, temperature, history=[], long_connection=True) # 带超时倒计时
|
70 |
-
|
71 |
-
chatbot[-1] = (i_say_show_user, gpt_say)
|
72 |
-
history.append(i_say_show_user); history.append(gpt_say)
|
73 |
-
yield chatbot, history, '正常'
|
74 |
-
time.sleep(2)
|
75 |
-
|
76 |
-
i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
|
77 |
-
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
78 |
-
yield chatbot, history, '正常'
|
79 |
-
|
80 |
-
if not fast_debug:
|
81 |
-
# ** gpt request **
|
82 |
-
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, api_key=api_key, temperature=temperature, history=history)
|
83 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, api_key, temperature, history=history, long_connection=True) # 带超时倒计时
|
84 |
-
|
85 |
-
chatbot[-1] = (i_say, gpt_say)
|
86 |
-
history.append(i_say); history.append(gpt_say)
|
87 |
-
yield chatbot, history, '正常'
|
88 |
-
res = write_results_to_file(history)
|
89 |
-
chatbot.append(("完成了吗?", res))
|
90 |
-
yield chatbot, history, '正常'
|
91 |
|
92 |
@CatchException
|
93 |
-
def 解析一个Python项目(txt,
|
94 |
history = [] # 清空历史,以免输入溢出
|
95 |
import glob, os
|
96 |
if os.path.exists(txt):
|
@@ -98,18 +117,18 @@ def 解析一个Python项目(txt, top_p, api_key, temperature, chatbot, history,
|
|
98 |
else:
|
99 |
if txt == "": txt = '空空如也的输入栏'
|
100 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
101 |
-
yield chatbot, history
|
102 |
return
|
103 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
104 |
if len(file_manifest) == 0:
|
105 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
106 |
-
yield chatbot, history
|
107 |
return
|
108 |
-
yield from
|
109 |
|
110 |
|
111 |
@CatchException
|
112 |
-
def 解析一个C项目的头文件(txt,
|
113 |
history = [] # 清空历史,以免输入溢出
|
114 |
import glob, os
|
115 |
if os.path.exists(txt):
|
@@ -117,19 +136,19 @@ def 解析一个C项目的头文件(txt, top_p, api_key, temperature, chatbot, h
|
|
117 |
else:
|
118 |
if txt == "": txt = '空空如也的输入栏'
|
119 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
120 |
-
yield chatbot, history
|
121 |
return
|
122 |
-
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)]
|
123 |
-
|
124 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
125 |
if len(file_manifest) == 0:
|
126 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
127 |
-
yield chatbot, history
|
128 |
return
|
129 |
-
yield from
|
130 |
|
131 |
@CatchException
|
132 |
-
def 解析一个C项目(txt,
|
133 |
history = [] # 清空历史,以免输入溢出
|
134 |
import glob, os
|
135 |
if os.path.exists(txt):
|
@@ -137,20 +156,21 @@ def 解析一个C项目(txt, top_p, api_key, temperature, chatbot, history, syst
|
|
137 |
else:
|
138 |
if txt == "": txt = '空空如也的输入栏'
|
139 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
140 |
-
yield chatbot, history
|
141 |
return
|
142 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
143 |
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
|
|
144 |
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
145 |
if len(file_manifest) == 0:
|
146 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
147 |
-
yield chatbot, history
|
148 |
return
|
149 |
-
yield from
|
150 |
|
151 |
|
152 |
@CatchException
|
153 |
-
def 解析一个Java项目(txt,
|
154 |
history = [] # 清空历史,以免输入溢出
|
155 |
import glob, os
|
156 |
if os.path.exists(txt):
|
@@ -158,7 +178,7 @@ def 解析一个Java项目(txt, top_p, api_key, temperature, chatbot, history, s
|
|
158 |
else:
|
159 |
if txt == "": txt = '空空如也的输入栏'
|
160 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
161 |
-
yield chatbot, history
|
162 |
return
|
163 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
164 |
[f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
|
@@ -166,13 +186,13 @@ def 解析一个Java项目(txt, top_p, api_key, temperature, chatbot, history, s
|
|
166 |
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
167 |
if len(file_manifest) == 0:
|
168 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
169 |
-
yield chatbot, history
|
170 |
return
|
171 |
-
yield from
|
172 |
|
173 |
|
174 |
@CatchException
|
175 |
-
def 解析一个Rect项目(txt,
|
176 |
history = [] # 清空历史,以免输入溢出
|
177 |
import glob, os
|
178 |
if os.path.exists(txt):
|
@@ -180,7 +200,7 @@ def 解析一个Rect项目(txt, top_p, api_key, temperature, chatbot, history, s
|
|
180 |
else:
|
181 |
if txt == "": txt = '空空如也的输入栏'
|
182 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
183 |
-
yield chatbot, history
|
184 |
return
|
185 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
186 |
[f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
|
@@ -189,13 +209,13 @@ def 解析一个Rect项目(txt, top_p, api_key, temperature, chatbot, history, s
|
|
189 |
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
190 |
if len(file_manifest) == 0:
|
191 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
|
192 |
-
yield chatbot, history
|
193 |
return
|
194 |
-
yield from
|
195 |
|
196 |
|
197 |
@CatchException
|
198 |
-
def 解析一个Golang项目(txt,
|
199 |
history = [] # 清空历史,以免输入溢出
|
200 |
import glob, os
|
201 |
if os.path.exists(txt):
|
@@ -203,11 +223,11 @@ def 解析一个Golang项目(txt, top_p, api_key, temperature, chatbot, history,
|
|
203 |
else:
|
204 |
if txt == "": txt = '空空如也的输入栏'
|
205 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
206 |
-
yield chatbot, history
|
207 |
return
|
208 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)]
|
209 |
if len(file_manifest) == 0:
|
210 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
211 |
-
yield chatbot, history
|
212 |
return
|
213 |
-
yield from
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
|
4 |
+
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
5 |
+
import os, copy
|
6 |
+
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
7 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
8 |
+
msg = '正常'
|
9 |
+
inputs_array = []
|
10 |
+
inputs_show_user_array = []
|
11 |
+
history_array = []
|
12 |
+
sys_prompt_array = []
|
13 |
+
report_part_1 = []
|
14 |
+
|
15 |
+
############################## <第一步,逐个文件分析,多线程> ##################################
|
16 |
for index, fp in enumerate(file_manifest):
|
17 |
with open(fp, 'r', encoding='utf-8') as f:
|
18 |
file_content = f.read()
|
|
|
19 |
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
20 |
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
21 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
22 |
+
# 装载请求内容
|
23 |
+
inputs_array.append(i_say)
|
24 |
+
inputs_show_user_array.append(i_say_show_user)
|
25 |
+
history_array.append([])
|
26 |
+
sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。")
|
27 |
+
|
28 |
+
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
29 |
+
inputs_array = inputs_array,
|
30 |
+
inputs_show_user_array = inputs_show_user_array,
|
31 |
+
history_array = history_array,
|
32 |
+
sys_prompt_array = sys_prompt_array,
|
33 |
+
llm_kwargs = llm_kwargs,
|
34 |
+
chatbot = chatbot,
|
35 |
+
show_user_at_complete = True
|
36 |
+
)
|
37 |
+
|
38 |
+
report_part_1 = copy.deepcopy(gpt_response_collection)
|
39 |
+
history_to_return = report_part_1
|
40 |
+
res = write_results_to_file(report_part_1)
|
41 |
+
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
|
42 |
+
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
43 |
+
|
44 |
+
############################## <存储中间数据进行调试> ##################################
|
45 |
|
46 |
+
# def objdump(obj):
|
47 |
+
# import pickle
|
48 |
+
# with open('objdump.tmp', 'wb+') as f:
|
49 |
+
# pickle.dump(obj, f)
|
50 |
+
# return
|
51 |
+
|
52 |
+
# def objload():
|
53 |
+
# import pickle, os
|
54 |
+
# if not os.path.exists('objdump.tmp'):
|
55 |
+
# return
|
56 |
+
# with open('objdump.tmp', 'rb') as f:
|
57 |
+
# return pickle.load(f)
|
58 |
+
# objdump([report_part_1, gpt_response_collection, history_to_return, file_manifest, project_folder, fp, llm_kwargs, chatbot])
|
59 |
+
|
60 |
+
############################## <第二步,综合,单线程,分组+迭代处理> ##################################
|
61 |
+
batchsize = 16 # 10个文件为一组
|
62 |
+
report_part_2 = []
|
63 |
+
previous_iteration_files = []
|
64 |
+
last_iteration_result = ""
|
65 |
+
while True:
|
66 |
+
if len(file_manifest) == 0: break
|
67 |
+
this_iteration_file_manifest = file_manifest[:batchsize]
|
68 |
+
this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2]
|
69 |
+
file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]
|
70 |
+
# 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}"
|
71 |
+
for index, content in enumerate(this_iteration_gpt_response_collection):
|
72 |
+
if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token
|
73 |
+
previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
74 |
+
previous_iteration_files_string = ', '.join(previous_iteration_files)
|
75 |
+
current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
76 |
+
i_say = f'根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括{previous_iteration_files_string})。'
|
77 |
+
inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
|
78 |
+
this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
|
79 |
+
this_iteration_history.append(last_iteration_result)
|
80 |
+
result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
81 |
+
inputs=i_say, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
|
82 |
+
history=this_iteration_history, # 迭代之前的分析
|
83 |
+
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
|
84 |
+
report_part_2.extend([i_say, result])
|
85 |
+
last_iteration_result = result
|
86 |
+
|
87 |
+
file_manifest = file_manifest[batchsize:]
|
88 |
+
gpt_response_collection = gpt_response_collection[batchsize*2:]
|
89 |
+
|
90 |
+
############################## <END> ##################################
|
91 |
+
history_to_return.extend(report_part_2)
|
92 |
+
res = write_results_to_file(history_to_return)
|
93 |
+
chatbot.append(("完成了吗?", res))
|
94 |
+
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
95 |
|
96 |
|
97 |
@CatchException
|
98 |
+
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
99 |
history = [] # 清空历史,以免输入溢出
|
100 |
+
import glob
|
101 |
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
102 |
+
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \
|
103 |
+
[f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
104 |
+
project_folder = './'
|
105 |
+
if len(file_manifest) == 0:
|
106 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
107 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
108 |
+
return
|
109 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
@CatchException
|
112 |
+
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
113 |
history = [] # 清空历史,以免输入溢出
|
114 |
import glob, os
|
115 |
if os.path.exists(txt):
|
|
|
117 |
else:
|
118 |
if txt == "": txt = '空空如也的输入栏'
|
119 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
120 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
121 |
return
|
122 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
123 |
if len(file_manifest) == 0:
|
124 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
125 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
126 |
return
|
127 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
128 |
|
129 |
|
130 |
@CatchException
|
131 |
+
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
132 |
history = [] # 清空历史,以免输入溢出
|
133 |
import glob, os
|
134 |
if os.path.exists(txt):
|
|
|
136 |
else:
|
137 |
if txt == "": txt = '空空如也的输入栏'
|
138 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
139 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
140 |
return
|
141 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
142 |
+
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
|
143 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
144 |
if len(file_manifest) == 0:
|
145 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
146 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
147 |
return
|
148 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
149 |
|
150 |
@CatchException
|
151 |
+
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
152 |
history = [] # 清空历史,以免输入溢出
|
153 |
import glob, os
|
154 |
if os.path.exists(txt):
|
|
|
156 |
else:
|
157 |
if txt == "": txt = '空空如也的输入栏'
|
158 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
159 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
160 |
return
|
161 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
162 |
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
163 |
+
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
|
164 |
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
165 |
if len(file_manifest) == 0:
|
166 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
167 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
168 |
return
|
169 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
170 |
|
171 |
|
172 |
@CatchException
|
173 |
+
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
174 |
history = [] # 清空历史,以免输入溢出
|
175 |
import glob, os
|
176 |
if os.path.exists(txt):
|
|
|
178 |
else:
|
179 |
if txt == "": txt = '空空如也的输入栏'
|
180 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
181 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
182 |
return
|
183 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
184 |
[f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
|
|
|
186 |
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
187 |
if len(file_manifest) == 0:
|
188 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
189 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
190 |
return
|
191 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
192 |
|
193 |
|
194 |
@CatchException
|
195 |
+
def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
196 |
history = [] # 清空历史,以免输入溢出
|
197 |
import glob, os
|
198 |
if os.path.exists(txt):
|
|
|
200 |
else:
|
201 |
if txt == "": txt = '空空如也的输入栏'
|
202 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
203 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
204 |
return
|
205 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
206 |
[f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
|
|
|
209 |
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
210 |
if len(file_manifest) == 0:
|
211 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
|
212 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
213 |
return
|
214 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
215 |
|
216 |
|
217 |
@CatchException
|
218 |
+
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
219 |
history = [] # 清空历史,以免输入溢出
|
220 |
import glob, os
|
221 |
if os.path.exists(txt):
|
|
|
223 |
else:
|
224 |
if txt == "": txt = '空空如也的输入栏'
|
225 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
226 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
227 |
return
|
228 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)]
|
229 |
if len(file_manifest) == 0:
|
230 |
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
231 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
232 |
return
|
233 |
+
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/读文章写摘要.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
-
from
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
|
|
3 |
fast_debug = False
|
4 |
|
5 |
|
6 |
-
def 解析Paper(file_manifest, project_folder,
|
7 |
import time, glob, os
|
8 |
print('begin analysis on:', file_manifest)
|
9 |
for index, fp in enumerate(file_manifest):
|
@@ -14,43 +15,39 @@ def 解析Paper(file_manifest, project_folder, top_p, api_key, temperature, chat
|
|
14 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
15 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
16 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
17 |
-
|
18 |
-
yield chatbot, history, '正常'
|
19 |
|
20 |
if not fast_debug:
|
21 |
msg = '正常'
|
22 |
# ** gpt request **
|
23 |
-
gpt_say = yield from
|
24 |
|
25 |
-
print('[2] end gpt req')
|
26 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
27 |
history.append(i_say_show_user); history.append(gpt_say)
|
28 |
-
|
29 |
-
yield chatbot, history, msg
|
30 |
-
print('[4] next')
|
31 |
if not fast_debug: time.sleep(2)
|
32 |
|
33 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
34 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
35 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
36 |
-
yield chatbot, history
|
37 |
|
38 |
if not fast_debug:
|
39 |
msg = '正常'
|
40 |
# ** gpt request **
|
41 |
-
gpt_say = yield from
|
42 |
|
43 |
chatbot[-1] = (i_say, gpt_say)
|
44 |
history.append(i_say); history.append(gpt_say)
|
45 |
-
yield chatbot, history, msg
|
46 |
res = write_results_to_file(history)
|
47 |
chatbot.append(("完成了吗?", res))
|
48 |
-
yield chatbot, history, msg
|
49 |
|
50 |
|
51 |
|
52 |
@CatchException
|
53 |
-
def 读文章写摘要(txt,
|
54 |
history = [] # 清空历史,以免输入溢出
|
55 |
import glob, os
|
56 |
if os.path.exists(txt):
|
@@ -58,13 +55,13 @@ def 读文章写摘要(txt, top_p, api_key, temperature, chatbot, history, syste
|
|
58 |
else:
|
59 |
if txt == "": txt = '空空如也的输入栏'
|
60 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
61 |
-
yield chatbot, history
|
62 |
return
|
63 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
64 |
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
65 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
66 |
if len(file_manifest) == 0:
|
67 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
68 |
-
yield chatbot, history
|
69 |
return
|
70 |
-
yield from 解析Paper(file_manifest, project_folder,
|
|
|
1 |
+
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
fast_debug = False
|
5 |
|
6 |
|
7 |
+
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
8 |
import time, glob, os
|
9 |
print('begin analysis on:', file_manifest)
|
10 |
for index, fp in enumerate(file_manifest):
|
|
|
15 |
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
16 |
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
17 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
18 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
19 |
|
20 |
if not fast_debug:
|
21 |
msg = '正常'
|
22 |
# ** gpt request **
|
23 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
24 |
|
|
|
25 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
26 |
history.append(i_say_show_user); history.append(gpt_say)
|
27 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
28 |
if not fast_debug: time.sleep(2)
|
29 |
|
30 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
31 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
32 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
33 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
34 |
|
35 |
if not fast_debug:
|
36 |
msg = '正常'
|
37 |
# ** gpt request **
|
38 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
39 |
|
40 |
chatbot[-1] = (i_say, gpt_say)
|
41 |
history.append(i_say); history.append(gpt_say)
|
42 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
43 |
res = write_results_to_file(history)
|
44 |
chatbot.append(("完成了吗?", res))
|
45 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
46 |
|
47 |
|
48 |
|
49 |
@CatchException
|
50 |
+
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
51 |
history = [] # 清空历史,以免输入溢出
|
52 |
import glob, os
|
53 |
if os.path.exists(txt):
|
|
|
55 |
else:
|
56 |
if txt == "": txt = '空空如也的输入栏'
|
57 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
58 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
59 |
return
|
60 |
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
61 |
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
62 |
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
63 |
if len(file_manifest) == 0:
|
64 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
65 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
66 |
return
|
67 |
+
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
crazy_functions/谷歌检索小助手.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
+
from toolbox import update_ui
|
4 |
+
|
5 |
+
def get_meta_information(url, chatbot, history):
|
6 |
+
import requests
|
7 |
+
import arxiv
|
8 |
+
import difflib
|
9 |
+
from bs4 import BeautifulSoup
|
10 |
+
from toolbox import get_conf
|
11 |
+
proxies, = get_conf('proxies')
|
12 |
+
headers = {
|
13 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
|
14 |
+
}
|
15 |
+
# 发送 GET 请求
|
16 |
+
response = requests.get(url, proxies=proxies, headers=headers)
|
17 |
+
|
18 |
+
# 解析网页内容
|
19 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
20 |
+
|
21 |
+
def string_similar(s1, s2):
|
22 |
+
return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
|
23 |
+
|
24 |
+
profile = []
|
25 |
+
# 获取所有文章的标题和作者
|
26 |
+
for result in soup.select(".gs_ri"):
|
27 |
+
title = result.a.text.replace('\n', ' ').replace(' ', ' ')
|
28 |
+
author = result.select_one(".gs_a").text
|
29 |
+
try:
|
30 |
+
citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来
|
31 |
+
except:
|
32 |
+
citation = 'cited by 0'
|
33 |
+
abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格
|
34 |
+
search = arxiv.Search(
|
35 |
+
query = title,
|
36 |
+
max_results = 1,
|
37 |
+
sort_by = arxiv.SortCriterion.Relevance,
|
38 |
+
)
|
39 |
+
paper = next(search.results())
|
40 |
+
if string_similar(title, paper.title) > 0.90: # same paper
|
41 |
+
abstract = paper.summary.replace('\n', ' ')
|
42 |
+
is_paper_in_arxiv = True
|
43 |
+
else: # different paper
|
44 |
+
abstract = abstract
|
45 |
+
is_paper_in_arxiv = False
|
46 |
+
paper = next(search.results())
|
47 |
+
print(title)
|
48 |
+
print(author)
|
49 |
+
print(citation)
|
50 |
+
profile.append({
|
51 |
+
'title':title,
|
52 |
+
'author':author,
|
53 |
+
'citation':citation,
|
54 |
+
'abstract':abstract,
|
55 |
+
'is_paper_in_arxiv':is_paper_in_arxiv,
|
56 |
+
})
|
57 |
+
|
58 |
+
chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract]
|
59 |
+
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
60 |
+
return profile
|
61 |
+
|
62 |
+
@CatchException
|
63 |
+
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
64 |
+
# 基本信息:功能、贡献者
|
65 |
+
chatbot.append([
|
66 |
+
"函数插件功能?",
|
67 |
+
"分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."])
|
68 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
69 |
+
|
70 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
71 |
+
try:
|
72 |
+
import arxiv
|
73 |
+
from bs4 import BeautifulSoup
|
74 |
+
except:
|
75 |
+
report_execption(chatbot, history,
|
76 |
+
a = f"解析项目: {txt}",
|
77 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
78 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
79 |
+
return
|
80 |
+
|
81 |
+
# 清空历史,以免输入溢出
|
82 |
+
history = []
|
83 |
+
|
84 |
+
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
85 |
+
|
86 |
+
if len(meta_paper_info_list[:10]) > 0:
|
87 |
+
i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \
|
88 |
+
"1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
|
89 |
+
f"以下是信息源:{str(meta_paper_info_list[:10])}"
|
90 |
+
|
91 |
+
inputs_show_user = f"请分析此页面中出现的所有文章:{txt}"
|
92 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
93 |
+
inputs=i_say, inputs_show_user=inputs_show_user,
|
94 |
+
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
95 |
+
sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。"
|
96 |
+
)
|
97 |
+
|
98 |
+
history.extend([ "第一批", gpt_say ])
|
99 |
+
meta_paper_info_list = meta_paper_info_list[10:]
|
100 |
+
|
101 |
+
chatbot.append(["状态?", "已经全部完成"])
|
102 |
+
msg = '正常'
|
103 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
104 |
+
res = write_results_to_file(history)
|
105 |
+
chatbot.append(("完成了吗?", res));
|
106 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
crazy_functions/高级功能函数模板.py
CHANGED
@@ -1,25 +1,29 @@
|
|
1 |
-
from
|
2 |
-
from
|
3 |
import datetime
|
4 |
-
|
5 |
@CatchException
|
6 |
-
def 高阶功能模板函数(txt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
history = [] # 清空历史,以免输入溢出
|
8 |
-
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]
|
9 |
-
yield chatbot, history
|
10 |
-
|
11 |
for i in range(5):
|
12 |
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
13 |
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
|
14 |
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
inputs=i_say, top_p=top_p, api_key=api_key, temperature=temperature, history=[],
|
21 |
-
sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。") # 请求gpt,需要一段时间
|
22 |
-
|
23 |
chatbot[-1] = (i_say, gpt_say)
|
24 |
history.append(i_say);history.append(gpt_say)
|
25 |
-
yield chatbot, history
|
|
|
1 |
+
from toolbox import CatchException, update_ui
|
2 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
|
|
4 |
@CatchException
|
5 |
+
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
6 |
+
"""
|
7 |
+
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
8 |
+
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
9 |
+
plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行
|
10 |
+
chatbot 聊天显示框的句柄,用于显示给用户
|
11 |
+
history 聊天历史,前情提要
|
12 |
+
system_prompt 给gpt的静默提醒
|
13 |
+
web_port 当前软件运行的端口号
|
14 |
+
"""
|
15 |
history = [] # 清空历史,以免输入溢出
|
16 |
+
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
|
17 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
|
|
18 |
for i in range(5):
|
19 |
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
20 |
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
|
21 |
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
|
22 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
23 |
+
inputs=i_say, inputs_show_user=i_say,
|
24 |
+
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
25 |
+
sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。"
|
26 |
+
)
|
|
|
|
|
|
|
27 |
chatbot[-1] = (i_say, gpt_say)
|
28 |
history.append(i_say);history.append(gpt_say)
|
29 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
main.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
+
import gradio as gr
|
3 |
+
from request_llm.bridge_chatgpt import predict
|
4 |
+
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
5 |
+
|
6 |
+
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
7 |
+
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
8 |
+
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
9 |
+
|
10 |
+
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
11 |
+
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
12 |
+
if not AUTHENTICATION: AUTHENTICATION = None
|
13 |
+
|
14 |
+
from check_proxy import get_current_version
|
15 |
+
initial_prompt = "Serve me as a writing and programming assistant."
|
16 |
+
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
17 |
+
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
18 |
+
|
19 |
+
# 问询记录, python 版本建议3.9+(越新越好)
|
20 |
+
import logging
|
21 |
+
os.makedirs("gpt_log", exist_ok=True)
|
22 |
+
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
23 |
+
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
24 |
+
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
25 |
+
|
26 |
+
# 一些普通功能模块
|
27 |
+
from core_functional import get_core_functions
|
28 |
+
functional = get_core_functions()
|
29 |
+
|
30 |
+
# 高级函数插件
|
31 |
+
from crazy_functional import get_crazy_functions
|
32 |
+
crazy_fns = get_crazy_functions()
|
33 |
+
|
34 |
+
# 处理markdown文本格式的转变
|
35 |
+
gr.Chatbot.postprocess = format_io
|
36 |
+
|
37 |
+
# 做一些外观色彩上的调整
|
38 |
+
from theme import adjust_theme, advanced_css
|
39 |
+
set_theme = adjust_theme()
|
40 |
+
|
41 |
+
# 代理与自动更新
|
42 |
+
from check_proxy import check_proxy, auto_update
|
43 |
+
proxy_info = check_proxy(proxies)
|
44 |
+
|
45 |
+
gr_L1 = lambda: gr.Row().style()
|
46 |
+
gr_L2 = lambda scale: gr.Column(scale=scale)
|
47 |
+
if LAYOUT == "TOP-DOWN":
|
48 |
+
gr_L1 = lambda: DummyWith()
|
49 |
+
gr_L2 = lambda scale: gr.Row()
|
50 |
+
CHATBOT_HEIGHT /= 2
|
51 |
+
|
52 |
+
cancel_handles = []
|
53 |
+
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
54 |
+
gr.HTML(title_html)
|
55 |
+
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
56 |
+
with gr_L1():
|
57 |
+
with gr_L2(scale=2):
|
58 |
+
chatbot = gr.Chatbot()
|
59 |
+
chatbot.style(height=CHATBOT_HEIGHT)
|
60 |
+
history = gr.State([])
|
61 |
+
with gr_L2(scale=1):
|
62 |
+
with gr.Accordion("输入区", open=True) as area_input_primary:
|
63 |
+
with gr.Row():
|
64 |
+
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
65 |
+
with gr.Row():
|
66 |
+
submitBtn = gr.Button("提交", variant="primary")
|
67 |
+
with gr.Row():
|
68 |
+
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
69 |
+
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
70 |
+
with gr.Row():
|
71 |
+
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
72 |
+
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
73 |
+
with gr.Row():
|
74 |
+
for k in functional:
|
75 |
+
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
76 |
+
functional[k]["Button"] = gr.Button(k, variant=variant)
|
77 |
+
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
78 |
+
with gr.Row():
|
79 |
+
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
80 |
+
with gr.Row():
|
81 |
+
for k in crazy_fns:
|
82 |
+
if not crazy_fns[k].get("AsButton", True): continue
|
83 |
+
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
84 |
+
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
85 |
+
crazy_fns[k]["Button"].style(size="sm")
|
86 |
+
with gr.Row():
|
87 |
+
with gr.Accordion("更多函数插件", open=True):
|
88 |
+
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
89 |
+
with gr.Column(scale=1):
|
90 |
+
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
|
91 |
+
with gr.Column(scale=1):
|
92 |
+
switchy_bt = gr.Button(r"请先从插件列表中��择", variant="secondary")
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
95 |
+
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
96 |
+
with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
|
97 |
+
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
98 |
+
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
99 |
+
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
100 |
+
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
101 |
+
gr.Markdown(description)
|
102 |
+
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
103 |
+
with gr.Row():
|
104 |
+
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
105 |
+
with gr.Row():
|
106 |
+
submitBtn2 = gr.Button("提交", variant="primary")
|
107 |
+
with gr.Row():
|
108 |
+
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
109 |
+
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
110 |
+
# 功能区显示开关与功能区的互动
|
111 |
+
def fn_area_visibility(a):
|
112 |
+
ret = {}
|
113 |
+
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
114 |
+
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
115 |
+
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
116 |
+
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
117 |
+
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
118 |
+
return ret
|
119 |
+
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
|
120 |
+
# 整理反复出现的控件句柄组合
|
121 |
+
input_combo = [cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
122 |
+
output_combo = [cookies, chatbot, history, status]
|
123 |
+
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
124 |
+
# 提交按钮、重置按钮
|
125 |
+
cancel_handles.append(txt.submit(**predict_args))
|
126 |
+
cancel_handles.append(txt2.submit(**predict_args))
|
127 |
+
cancel_handles.append(submitBtn.click(**predict_args))
|
128 |
+
cancel_handles.append(submitBtn2.click(**predict_args))
|
129 |
+
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
130 |
+
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
131 |
+
# 基础功能区的回调函数注册
|
132 |
+
for k in functional:
|
133 |
+
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
134 |
+
cancel_handles.append(click_handle)
|
135 |
+
# 文件上传区,接收文件后与chatbot的互动
|
136 |
+
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
137 |
+
# 函数插件-固定按钮区
|
138 |
+
for k in crazy_fns:
|
139 |
+
if not crazy_fns[k].get("AsButton", True): continue
|
140 |
+
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
141 |
+
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
142 |
+
cancel_handles.append(click_handle)
|
143 |
+
# 函数插件-下拉菜单与随变按钮的互动
|
144 |
+
def on_dropdown_changed(k):
|
145 |
+
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
146 |
+
return {switchy_bt: gr.update(value=k, variant=variant)}
|
147 |
+
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
|
148 |
+
# 随变按钮的回调函数注册
|
149 |
+
def route(k, *args, **kwargs):
|
150 |
+
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
151 |
+
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
152 |
+
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
153 |
+
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
154 |
+
# def expand_file_area(file_upload, area_file_up):
|
155 |
+
# if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
|
156 |
+
# click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
|
157 |
+
cancel_handles.append(click_handle)
|
158 |
+
# 终止按钮的回调函数注册
|
159 |
+
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
160 |
+
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
161 |
+
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
162 |
+
def auto_opentab_delay():
|
163 |
+
import threading, webbrowser, time
|
164 |
+
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
165 |
+
print(f"\t(亮色主题): http://localhost:{PORT}")
|
166 |
+
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
167 |
+
def open():
|
168 |
+
time.sleep(2) # 打开浏览器
|
169 |
+
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
170 |
+
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
171 |
+
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
172 |
+
|
173 |
+
auto_opentab_delay()
|
174 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
|
request_llm/README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 如何使用其他大语言模型(dev分支测试中)
|
2 |
+
|
3 |
+
## 1. 先运行text-generation
|
4 |
+
``` sh
|
5 |
+
# 下载模型( text-generation 这么牛的项目,别忘了给人家star )
|
6 |
+
git clone https://github.com/oobabooga/text-generation-webui.git
|
7 |
+
|
8 |
+
# 安装text-generation的额外依赖
|
9 |
+
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
|
10 |
+
|
11 |
+
# 切换路径
|
12 |
+
cd text-generation-webui
|
13 |
+
|
14 |
+
# 下载模型
|
15 |
+
python download-model.py facebook/galactica-1.3b
|
16 |
+
# 其他可选如 facebook/opt-1.3b
|
17 |
+
# facebook/galactica-6.7b
|
18 |
+
# facebook/galactica-120b
|
19 |
+
# facebook/pygmalion-1.3b 等
|
20 |
+
# 详情见 https://github.com/oobabooga/text-generation-webui
|
21 |
+
|
22 |
+
# 启动text-generation,注意把模型的斜杠改成下划线
|
23 |
+
python server.py --cpu --listen --listen-port 7860 --model facebook_galactica-1.3b
|
24 |
+
```
|
25 |
+
|
26 |
+
## 2. 修改config.py
|
27 |
+
``` sh
|
28 |
+
# LLM_MODEL格式较复杂 TGUI:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
|
29 |
+
LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860"
|
30 |
+
```
|
31 |
+
|
32 |
+
## 3. 运行!
|
33 |
+
``` sh
|
34 |
+
cd chatgpt-academic
|
35 |
+
python main.py
|
36 |
+
```
|
predict.py → request_llm/bridge_chatgpt.py
RENAMED
@@ -12,6 +12,7 @@
|
|
12 |
"""
|
13 |
|
14 |
import json
|
|
|
15 |
import gradio as gr
|
16 |
import logging
|
17 |
import traceback
|
@@ -20,7 +21,7 @@ import importlib
|
|
20 |
|
21 |
# config_private.py放自己的秘密如API和代理网址
|
22 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
23 |
-
from toolbox import get_conf
|
24 |
proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL = \
|
25 |
get_conf('proxies', 'API_URL', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'LLM_MODEL')
|
26 |
|
@@ -38,45 +39,23 @@ def get_full_error(chunk, stream_response):
|
|
38 |
break
|
39 |
return chunk
|
40 |
|
41 |
-
def predict_no_ui(inputs, top_p, api_key, temperature, history=[], sys_prompt=""):
|
42 |
-
"""
|
43 |
-
发送至chatGPT,等待回复,一次性完成,不显示中间过程。
|
44 |
-
predict函数的简化版。
|
45 |
-
用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能。
|
46 |
-
|
47 |
-
inputs 是本次问询的输入
|
48 |
-
top_p, api_key, temperature是chatGPT的内部调优参数
|
49 |
-
history 是之前的对话列表
|
50 |
-
(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
|
51 |
-
"""
|
52 |
-
headers, payload = generate_payload(inputs, top_p, api_key, temperature, history, system_prompt=sys_prompt, stream=False)
|
53 |
-
|
54 |
-
retry = 0
|
55 |
-
while True:
|
56 |
-
try:
|
57 |
-
# make a POST request to the API endpoint, stream=False
|
58 |
-
response = requests.post(API_URL, headers=headers, proxies=proxies,
|
59 |
-
json=payload, stream=False, timeout=TIMEOUT_SECONDS*2); break
|
60 |
-
except requests.exceptions.ReadTimeout as e:
|
61 |
-
retry += 1
|
62 |
-
traceback.print_exc()
|
63 |
-
if retry > MAX_RETRY: raise TimeoutError
|
64 |
-
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
65 |
-
|
66 |
-
try:
|
67 |
-
result = json.loads(response.text)["choices"][0]["message"]["content"]
|
68 |
-
return result
|
69 |
-
except Exception as e:
|
70 |
-
if "choices" not in response.text: print(response.text)
|
71 |
-
raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
|
72 |
-
|
73 |
|
74 |
-
def predict_no_ui_long_connection(inputs,
|
75 |
"""
|
76 |
-
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
"""
|
78 |
-
|
79 |
-
|
80 |
retry = 0
|
81 |
while True:
|
82 |
try:
|
@@ -93,7 +72,10 @@ def predict_no_ui_long_connection(inputs, top_p, api_key, temperature, history=[
|
|
93 |
result = ''
|
94 |
while True:
|
95 |
try: chunk = next(stream_response).decode()
|
96 |
-
except StopIteration:
|
|
|
|
|
|
|
97 |
if len(chunk)==0: continue
|
98 |
if not chunk.startswith('data:'):
|
99 |
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
@@ -105,38 +87,56 @@ def predict_no_ui_long_connection(inputs, top_p, api_key, temperature, history=[
|
|
105 |
delta = json_data["delta"]
|
106 |
if len(delta) == 0: break
|
107 |
if "role" in delta: continue
|
108 |
-
if "content" in delta:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
else: raise RuntimeError("意外Json结构:"+delta)
|
110 |
if json_data['finish_reason'] == 'length':
|
111 |
-
raise ConnectionAbortedError("正常结束,但显示Token
|
112 |
return result
|
113 |
|
114 |
|
115 |
-
def predict(inputs,
|
116 |
-
stream = True, additional_fn=None):
|
117 |
"""
|
118 |
发送至chatGPT,流式获取输出。
|
119 |
用于基础的对话功能。
|
120 |
inputs 是本次问询的输入
|
121 |
-
top_p,
|
122 |
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
123 |
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
124 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
125 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
if additional_fn is not None:
|
127 |
-
import
|
128 |
-
importlib.reload(
|
129 |
-
|
130 |
-
if "PreProcess" in
|
131 |
-
inputs =
|
132 |
|
133 |
if stream:
|
134 |
raw_input = inputs
|
135 |
logging.info(f'[raw_input] {raw_input}')
|
136 |
chatbot.append((inputs, ""))
|
137 |
-
yield chatbot, history, "等待响应"
|
138 |
|
139 |
-
headers, payload = generate_payload(inputs,
|
140 |
history.append(inputs); history.append(" ")
|
141 |
|
142 |
retry = 0
|
@@ -149,7 +149,7 @@ def predict(inputs, top_p, api_key, temperature, chatbot=[], history=[], system_
|
|
149 |
retry += 1
|
150 |
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
151 |
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
152 |
-
yield chatbot, history, "请求超时"+retry_msg
|
153 |
if retry > MAX_RETRY: raise TimeoutError
|
154 |
|
155 |
gpt_replying_buffer = ""
|
@@ -177,34 +177,37 @@ def predict(inputs, top_p, api_key, temperature, chatbot=[], history=[], system_
|
|
177 |
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
|
178 |
history[-1] = gpt_replying_buffer
|
179 |
chatbot[-1] = (history[-2], history[-1])
|
180 |
-
yield chatbot, history, status_text
|
181 |
|
182 |
except Exception as e:
|
183 |
traceback.print_exc()
|
184 |
-
yield chatbot, history, "Json解析不合常规"
|
185 |
chunk = get_full_error(chunk, stream_response)
|
186 |
error_msg = chunk.decode()
|
187 |
if "reduce the length" in error_msg:
|
188 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message]
|
189 |
history = [] # 清除历史
|
190 |
elif "Incorrect API key" in error_msg:
|
191 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key
|
192 |
elif "exceeded your current quota" in error_msg:
|
193 |
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
|
194 |
else:
|
195 |
from toolbox import regular_txt_to_markdown
|
196 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
197 |
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
|
198 |
-
yield chatbot, history, "Json异常" + error_msg
|
199 |
return
|
200 |
|
201 |
-
def generate_payload(inputs,
|
202 |
"""
|
203 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
204 |
"""
|
|
|
|
|
|
|
205 |
headers = {
|
206 |
"Content-Type": "application/json",
|
207 |
-
"Authorization": f"Bearer {api_key}"
|
208 |
}
|
209 |
|
210 |
conversation_cnt = len(history) // 2
|
@@ -232,17 +235,19 @@ def generate_payload(inputs, top_p, api_key, temperature, history, system_prompt
|
|
232 |
messages.append(what_i_ask_now)
|
233 |
|
234 |
payload = {
|
235 |
-
"model":
|
236 |
"messages": messages,
|
237 |
-
"temperature": temperature, # 1.0,
|
238 |
-
"top_p": top_p, # 1.0,
|
239 |
"n": 1,
|
240 |
"stream": stream,
|
241 |
"presence_penalty": 0,
|
242 |
"frequency_penalty": 0,
|
243 |
}
|
244 |
-
|
245 |
-
|
|
|
|
|
246 |
return headers,payload
|
247 |
|
248 |
|
|
|
12 |
"""
|
13 |
|
14 |
import json
|
15 |
+
import time
|
16 |
import gradio as gr
|
17 |
import logging
|
18 |
import traceback
|
|
|
21 |
|
22 |
# config_private.py放自己的秘密如API和代理网址
|
23 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
+
from toolbox import get_conf, update_ui
|
25 |
proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL = \
|
26 |
get_conf('proxies', 'API_URL', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'LLM_MODEL')
|
27 |
|
|
|
39 |
break
|
40 |
return chunk
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
44 |
"""
|
45 |
+
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
46 |
+
inputs:
|
47 |
+
是本次问询的输入
|
48 |
+
sys_prompt:
|
49 |
+
系统静默prompt
|
50 |
+
llm_kwargs:
|
51 |
+
chatGPT的内部调优参数
|
52 |
+
history:
|
53 |
+
是之前的对话列表
|
54 |
+
observe_window = None:
|
55 |
+
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
56 |
"""
|
57 |
+
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
58 |
+
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
59 |
retry = 0
|
60 |
while True:
|
61 |
try:
|
|
|
72 |
result = ''
|
73 |
while True:
|
74 |
try: chunk = next(stream_response).decode()
|
75 |
+
except StopIteration:
|
76 |
+
break
|
77 |
+
except requests.exceptions.ConnectionError:
|
78 |
+
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
79 |
if len(chunk)==0: continue
|
80 |
if not chunk.startswith('data:'):
|
81 |
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
|
|
87 |
delta = json_data["delta"]
|
88 |
if len(delta) == 0: break
|
89 |
if "role" in delta: continue
|
90 |
+
if "content" in delta:
|
91 |
+
result += delta["content"]
|
92 |
+
if not console_slience: print(delta["content"], end='')
|
93 |
+
if observe_window is not None:
|
94 |
+
# 观测窗,把已经获取的数据显示出去
|
95 |
+
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
96 |
+
# 看门狗,如果超过期限没有喂狗,则终止
|
97 |
+
if len(observe_window) >= 2:
|
98 |
+
if (time.time()-observe_window[1]) > watch_dog_patience:
|
99 |
+
raise RuntimeError("程序终止。")
|
100 |
else: raise RuntimeError("意外Json结构:"+delta)
|
101 |
if json_data['finish_reason'] == 'length':
|
102 |
+
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
103 |
return result
|
104 |
|
105 |
|
106 |
+
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
|
|
107 |
"""
|
108 |
发送至chatGPT,流式获取输出。
|
109 |
用于基础的对话功能。
|
110 |
inputs 是本次问询的输入
|
111 |
+
top_p, temperature是chatGPT的内部调优参数
|
112 |
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
113 |
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
114 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
115 |
"""
|
116 |
+
if inputs.startswith('sk-') and len(inputs) == 51:
|
117 |
+
chatbot._cookies['api_key'] = inputs
|
118 |
+
chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
|
119 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
120 |
+
return
|
121 |
+
elif len(chatbot._cookies['api_key']) != 51:
|
122 |
+
chatbot.append((inputs, "缺少api_key。\n\n1. 解决方案:直接在输入区键入api_key,然后回车提交。"))
|
123 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
124 |
+
return
|
125 |
+
|
126 |
if additional_fn is not None:
|
127 |
+
import core_functional
|
128 |
+
importlib.reload(core_functional) # 热更新prompt
|
129 |
+
core_functional = core_functional.get_core_functions()
|
130 |
+
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
131 |
+
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
132 |
|
133 |
if stream:
|
134 |
raw_input = inputs
|
135 |
logging.info(f'[raw_input] {raw_input}')
|
136 |
chatbot.append((inputs, ""))
|
137 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
138 |
|
139 |
+
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
140 |
history.append(inputs); history.append(" ")
|
141 |
|
142 |
retry = 0
|
|
|
149 |
retry += 1
|
150 |
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
151 |
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
152 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
153 |
if retry > MAX_RETRY: raise TimeoutError
|
154 |
|
155 |
gpt_replying_buffer = ""
|
|
|
177 |
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
|
178 |
history[-1] = gpt_replying_buffer
|
179 |
chatbot[-1] = (history[-2], history[-1])
|
180 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
181 |
|
182 |
except Exception as e:
|
183 |
traceback.print_exc()
|
184 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
185 |
chunk = get_full_error(chunk, stream_response)
|
186 |
error_msg = chunk.decode()
|
187 |
if "reduce the length" in error_msg:
|
188 |
+
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现���释放,您可以请再次尝试.")
|
189 |
history = [] # 清除历史
|
190 |
elif "Incorrect API key" in error_msg:
|
191 |
+
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由,拒绝服务.")
|
192 |
elif "exceeded your current quota" in error_msg:
|
193 |
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
|
194 |
else:
|
195 |
from toolbox import regular_txt_to_markdown
|
196 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
197 |
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
|
198 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
199 |
return
|
200 |
|
201 |
+
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
202 |
"""
|
203 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
204 |
"""
|
205 |
+
if len(llm_kwargs['api_key']) != 51:
|
206 |
+
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
207 |
+
|
208 |
headers = {
|
209 |
"Content-Type": "application/json",
|
210 |
+
"Authorization": f"Bearer {llm_kwargs['api_key']}"
|
211 |
}
|
212 |
|
213 |
conversation_cnt = len(history) // 2
|
|
|
235 |
messages.append(what_i_ask_now)
|
236 |
|
237 |
payload = {
|
238 |
+
"model": llm_kwargs['llm_model'],
|
239 |
"messages": messages,
|
240 |
+
"temperature": llm_kwargs['temperature'], # 1.0,
|
241 |
+
"top_p": llm_kwargs['top_p'], # 1.0,
|
242 |
"n": 1,
|
243 |
"stream": stream,
|
244 |
"presence_penalty": 0,
|
245 |
"frequency_penalty": 0,
|
246 |
}
|
247 |
+
try:
|
248 |
+
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
249 |
+
except:
|
250 |
+
print('输入中可能存在乱码。')
|
251 |
return headers,payload
|
252 |
|
253 |
|
request_llm/bridge_tgui.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Contributed by SagsMug. Modified by binary-husky
|
3 |
+
https://github.com/oobabooga/text-generation-webui/pull/175
|
4 |
+
'''
|
5 |
+
|
6 |
+
import asyncio
|
7 |
+
import json
|
8 |
+
import random
|
9 |
+
import string
|
10 |
+
import websockets
|
11 |
+
import logging
|
12 |
+
import time
|
13 |
+
import threading
|
14 |
+
import importlib
|
15 |
+
from toolbox import get_conf, update_ui
|
16 |
+
LLM_MODEL, = get_conf('LLM_MODEL')
|
17 |
+
|
18 |
+
# "TGUI:galactica-1.3b@localhost:7860"
|
19 |
+
model_name, addr_port = LLM_MODEL.split('@')
|
20 |
+
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL
|
21 |
+
addr, port = addr_port.split(':')
|
22 |
+
|
23 |
+
def random_hash():
|
24 |
+
letters = string.ascii_lowercase + string.digits
|
25 |
+
return ''.join(random.choice(letters) for i in range(9))
|
26 |
+
|
27 |
+
async def run(context, max_token=512):
|
28 |
+
params = {
|
29 |
+
'max_new_tokens': max_token,
|
30 |
+
'do_sample': True,
|
31 |
+
'temperature': 0.5,
|
32 |
+
'top_p': 0.9,
|
33 |
+
'typical_p': 1,
|
34 |
+
'repetition_penalty': 1.05,
|
35 |
+
'encoder_repetition_penalty': 1.0,
|
36 |
+
'top_k': 0,
|
37 |
+
'min_length': 0,
|
38 |
+
'no_repeat_ngram_size': 0,
|
39 |
+
'num_beams': 1,
|
40 |
+
'penalty_alpha': 0,
|
41 |
+
'length_penalty': 1,
|
42 |
+
'early_stopping': True,
|
43 |
+
'seed': -1,
|
44 |
+
}
|
45 |
+
session = random_hash()
|
46 |
+
|
47 |
+
async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket:
|
48 |
+
while content := json.loads(await websocket.recv()):
|
49 |
+
#Python3.10 syntax, replace with if elif on older
|
50 |
+
if content["msg"] == "send_hash":
|
51 |
+
await websocket.send(json.dumps({
|
52 |
+
"session_hash": session,
|
53 |
+
"fn_index": 12
|
54 |
+
}))
|
55 |
+
elif content["msg"] == "estimation":
|
56 |
+
pass
|
57 |
+
elif content["msg"] == "send_data":
|
58 |
+
await websocket.send(json.dumps({
|
59 |
+
"session_hash": session,
|
60 |
+
"fn_index": 12,
|
61 |
+
"data": [
|
62 |
+
context,
|
63 |
+
params['max_new_tokens'],
|
64 |
+
params['do_sample'],
|
65 |
+
params['temperature'],
|
66 |
+
params['top_p'],
|
67 |
+
params['typical_p'],
|
68 |
+
params['repetition_penalty'],
|
69 |
+
params['encoder_repetition_penalty'],
|
70 |
+
params['top_k'],
|
71 |
+
params['min_length'],
|
72 |
+
params['no_repeat_ngram_size'],
|
73 |
+
params['num_beams'],
|
74 |
+
params['penalty_alpha'],
|
75 |
+
params['length_penalty'],
|
76 |
+
params['early_stopping'],
|
77 |
+
params['seed'],
|
78 |
+
]
|
79 |
+
}))
|
80 |
+
elif content["msg"] == "process_starts":
|
81 |
+
pass
|
82 |
+
elif content["msg"] in ["process_generating", "process_completed"]:
|
83 |
+
yield content["output"]["data"][0]
|
84 |
+
# You can search for your desired end indicator and
|
85 |
+
# stop generation by closing the websocket here
|
86 |
+
if (content["msg"] == "process_completed"):
|
87 |
+
break
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
94 |
+
"""
|
95 |
+
发送至chatGPT,流式获取输出。
|
96 |
+
用于基础的对话功能。
|
97 |
+
inputs 是本次问询的输入
|
98 |
+
top_p, temperature是chatGPT的内部调优参数
|
99 |
+
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
100 |
+
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
101 |
+
additional_fn代表点击的哪个按钮,按钮见functional.py
|
102 |
+
"""
|
103 |
+
if additional_fn is not None:
|
104 |
+
import core_functional
|
105 |
+
importlib.reload(core_functional) # 热更新prompt
|
106 |
+
core_functional = core_functional.get_core_functions()
|
107 |
+
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
108 |
+
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
109 |
+
|
110 |
+
raw_input = "What I would like to say is the following: " + inputs
|
111 |
+
logging.info(f'[raw_input] {raw_input}')
|
112 |
+
history.extend([inputs, ""])
|
113 |
+
chatbot.append([inputs, ""])
|
114 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
115 |
+
|
116 |
+
prompt = inputs
|
117 |
+
tgui_say = ""
|
118 |
+
|
119 |
+
mutable = ["", time.time()]
|
120 |
+
def run_coorotine(mutable):
|
121 |
+
async def get_result(mutable):
|
122 |
+
async for response in run(prompt):
|
123 |
+
print(response[len(mutable[0]):])
|
124 |
+
mutable[0] = response
|
125 |
+
if (time.time() - mutable[1]) > 3:
|
126 |
+
print('exit when no listener')
|
127 |
+
break
|
128 |
+
asyncio.run(get_result(mutable))
|
129 |
+
|
130 |
+
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
|
131 |
+
thread_listen.start()
|
132 |
+
|
133 |
+
while thread_listen.is_alive():
|
134 |
+
time.sleep(1)
|
135 |
+
mutable[1] = time.time()
|
136 |
+
# Print intermediate steps
|
137 |
+
if tgui_say != mutable[0]:
|
138 |
+
tgui_say = mutable[0]
|
139 |
+
history[-1] = tgui_say
|
140 |
+
chatbot[-1] = (history[-2], history[-1])
|
141 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
142 |
+
|
143 |
+
logging.info(f'[response] {tgui_say}')
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
|
148 |
+
raw_input = "What I would like to say is the following: " + inputs
|
149 |
+
prompt = inputs
|
150 |
+
tgui_say = ""
|
151 |
+
mutable = ["", time.time()]
|
152 |
+
def run_coorotine(mutable):
|
153 |
+
async def get_result(mutable):
|
154 |
+
async for response in run(prompt, max_token=20):
|
155 |
+
print(response[len(mutable[0]):])
|
156 |
+
mutable[0] = response
|
157 |
+
if (time.time() - mutable[1]) > 3:
|
158 |
+
print('exit when no listener')
|
159 |
+
break
|
160 |
+
asyncio.run(get_result(mutable))
|
161 |
+
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
|
162 |
+
thread_listen.start()
|
163 |
+
while thread_listen.is_alive():
|
164 |
+
time.sleep(1)
|
165 |
+
mutable[1] = time.time()
|
166 |
+
tgui_say = mutable[0]
|
167 |
+
return tgui_say
|
requirements.txt
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
gradio>=3.23
|
2 |
-
requests[socks]
|
3 |
-
mdtex2html
|
4 |
-
Markdown
|
5 |
-
latex2mathml
|
6 |
-
pdfminer
|
7 |
-
pymupdf
|
8 |
-
beautifulsoup4
|
9 |
-
rarfile
|
10 |
-
py7zr
|
11 |
-
python-docx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self_analysis.md
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
# chatgpt-academic项目自译解报告
|
2 |
-
(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
|
3 |
-
|
4 |
-
## [0/18] 程序摘要: functional_crazy.py
|
5 |
-
|
6 |
-
这是一个功能扩展的程序,文件名为 `functional_crazy.py`。代码的主要功能是通过提供一系列函数插件,增强程序的功能,让用户可以通过界面中的按钮,快速调用对应的函数插件实现相应的操作。代码中使用了 `HotReload` 函数插件,可以在不重启程序的情况下更新函数插件的代码,让其生效。同时,通过 `UserVisibleLevel` 变量的设置,可以控制哪些插件会在UI界面显示出来。函数插件列表包括了以下功能:解析项目本身、解析一个Python项目、解析一个C++项目头文件、解析一个C++项目、读取文章并生成摘要、批量生成函数注释、全项目切换成英文、批量总结PDF文档、批量总结PDF文档pdfminer、批量总结Word文档、高阶功能模板函数、以及其他未经充分测试的函数插件。
|
7 |
-
|
8 |
-
## [1/18] 程序摘要: main.py
|
9 |
-
|
10 |
-
该程序是一个基于Gradio构建的对话生成模型的Web界面示例,包含了以下主要功能:
|
11 |
-
|
12 |
-
1.加载模型并对用户输入进行响应;
|
13 |
-
2.通过调用外部函数库来获取用户的输入,并在模型生成的过程中进行处理;
|
14 |
-
3.支持用户上传本地文件,供外部函数库调用;
|
15 |
-
4.支持停止当前的生成过程;
|
16 |
-
5.保存用户的历史记录,并将其记录在本地日志文件中,以供后续分析和使用。
|
17 |
-
|
18 |
-
该程序需要依赖于一些外部库和软件包,如Gradio、torch等。用户需要确保这些依赖项已经安装,并且在运行该程序前对config_private.py配置文件进行相应的修改。
|
19 |
-
|
20 |
-
## [2/18] 程序摘要: functional.py
|
21 |
-
|
22 |
-
该文件定义了一个名为“functional”的函数,函数的作用是返回一个包含多个字典(键值对)的字典,每个键值对表示一种功能。该字典的键值由功能名称和对应的数据组成。其中的每个字典都包含4个键值对,分别为“Prefix”、“Suffix”、“Color”和“PreProcess”,分别表示前缀、后缀、按钮颜色和预处理函数。如果某些键值对没有给出,那么程序中默认相应的值,如按钮颜色默认为“secondary”等。每个功能描述了不同的学术润色/翻译/其他服务,如“英语学术润色”、“中文学术润色”、“查找语法错误”等。函数还引用了一个名为“clear_line_break”的函数,用于预处理修改前的文本。
|
23 |
-
|
24 |
-
## [3/18] 程序摘要: show_math.py
|
25 |
-
|
26 |
-
该程序文件名为show_math.py,主要用途是将Markdown和LaTeX混合格式转换成带有MathML的HTML格式。该程序通过递归地处理LaTeX和Markdown混合段落逐一转换成HTML/MathML标记出来,并在LaTeX公式创建中进行错误处理。在程序文件中定义了3个变量,分别是incomplete,convError和convert,其中convert函数是用来执行转换的主要函数。程序使用正则表达式进行LaTeX格式和Markdown段落的分割,从而实现转换。如果在Latex转换过程中发生错误,程序将输出相应的错误信息。
|
27 |
-
|
28 |
-
## [4/18] 程序摘要: predict.py
|
29 |
-
|
30 |
-
本程序文件的文件名为"./predict.py",主要包含三个函数:
|
31 |
-
|
32 |
-
1. predict:正常对话时使用,具备完备的交互功能,不可多线程;
|
33 |
-
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑;
|
34 |
-
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程。
|
35 |
-
|
36 |
-
其中,predict函数用于基础的对话功能,发送至chatGPT,流式获取输出,根据点击的哪个按钮,进行对话预处理等额外操作;predict_no_ui函数用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能;predict_no_ui_long_connection实现调用predict_no_ui处理长文档时,避免连接断掉的情况,支持多线程。
|
37 |
-
|
38 |
-
## [5/18] 程序摘要: check_proxy.py
|
39 |
-
|
40 |
-
该程序文件名为check_proxy.py,主要功能是检查代理服务器的可用性并返回代理服务器的地理位置信息或错误提示。具体实现方式如下:
|
41 |
-
|
42 |
-
首先使用requests模块向指定网站(https://ipapi.co/json/)发送GET请求,请求结果以JSON格式返回。如果代理服务器参数(proxies)是有效的且没有指明'https'代理,则用默认字典值'无'替代。
|
43 |
-
|
44 |
-
然后,程序会解析返回的JSON数据,并根据数据中是否包含国家名字字段来判断代理服务器的地理位置。如果有国家名字字段,则将其打印出来并返回代理服务器的相关信息。如果没有国家名字字段,但有错误信息字段,则返回其他错误提示信息。
|
45 |
-
|
46 |
-
在程序执行前,程序会先设置环境变量no_proxy,并使用toolbox模块中的get_conf函数从配置文件中读取代理参数。
|
47 |
-
|
48 |
-
最后,检测程序会输出检查结果并返回对应的结果字符串。
|
49 |
-
|
50 |
-
## [6/18] 程序摘要: config_private.py
|
51 |
-
|
52 |
-
本程序文件名为`config_private.py`,其功能为配置私有信息以便在主程序中使用。主要功能包括:
|
53 |
-
|
54 |
-
- 配置OpenAI API的密钥和API URL
|
55 |
-
- 配置是否使用代理,如果使用代理配置代理地址和端口
|
56 |
-
- 配置发送请求的超时时间和失败重试次数的限制
|
57 |
-
- 配置并行使用线程数和用户名密码
|
58 |
-
- 提供检查功能以确保API密钥已经正确设置
|
59 |
-
|
60 |
-
其中,需要特别注意的是:最后一个检查功能要求在运行之前必须将API密钥正确设置,否则程序会直接退出。
|
61 |
-
|
62 |
-
## [7/18] 程序摘要: config.py
|
63 |
-
|
64 |
-
该程序文件是一个配置文件,用于配置OpenAI的API参数和优化体验的相关参数,具体包括以下几个步骤:
|
65 |
-
|
66 |
-
1.设置OpenAI的API密钥。
|
67 |
-
|
68 |
-
2.选择是否使用代理,如果使用则需要设置代理地址和端口等参数。
|
69 |
-
|
70 |
-
3.设置请求OpenAI后的超时时间、网页的端口、重试次数、选择的OpenAI模型、API的网址等。
|
71 |
-
|
72 |
-
4.设置并行使用的线程数和用户名密码。
|
73 |
-
|
74 |
-
该程序文件的作用为在使用OpenAI API时进行相关参数的配置,以保证请求的正确性和速度,并且优化使用体验。
|
75 |
-
|
76 |
-
## [8/18] 程序摘要: theme.py
|
77 |
-
|
78 |
-
该程序是一个自定义Gradio主题的Python模块。主题文件名为"./theme.py"。程序引入了Gradio模块,并定义了一个名为"adjust_theme()"的函数。该函数根据输入值调整Gradio的默认主题,返回一个包含所需自定义属性的主题对象。主题属性包括颜色、字体、过渡、阴影、按钮边框和渐变等。主题颜色列表包括石板色、灰色、锌色、中性色、石头色、红色、橙色、琥珀色、黄色、酸橙色、绿色、祖母绿、青蓝色、青色、天蓝色、蓝色、靛蓝色、紫罗兰色、紫色、洋红色、粉红色和玫瑰色。如果Gradio版本较旧,则不能自定义字体和颜色。
|
79 |
-
|
80 |
-
## [9/18] 程序摘要: toolbox.py
|
81 |
-
|
82 |
-
该程序文件包含了一系列函数,用于实现聊天程序所需的各种功能,如预测对话、将对话记录写入文件、将普通文本转换为Markdown格式文本、装饰器函数CatchException和HotReload等。其中一些函数用到了第三方库,如Python-Markdown、mdtex2html、zipfile、tarfile、rarfile和py7zr。除此之外,还有一些辅助函数,如get_conf、clear_line_break和extract_archive等。主要功能包括:
|
83 |
-
|
84 |
-
1. 导入markdown、mdtex2html、threading、functools等模块。
|
85 |
-
2. 定义函数predict_no_ui_but_counting_down,用于生成对话。
|
86 |
-
3. 定义函数write_results_to_file,用于将对话记录生成Markdown文件。
|
87 |
-
4. 定义函数regular_txt_to_markdown,将普通文本转换为Markdown格式的文本。
|
88 |
-
5. 定义装饰器函数CatchException,用于捕获函数执行异常并返回生成器。
|
89 |
-
6. 定义函数report_execption,用于向chatbot中添加错误信息。
|
90 |
-
7. 定义函数text_divide_paragraph,用于将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
91 |
-
8. 定义函数markdown_convertion,用于将Markdown格式的文本转换为HTML格式。
|
92 |
-
9. 定义函数format_io,用于将输入和输出解析为HTML格式。
|
93 |
-
10. 定义函数find_free_port,用于返回当前系统中可用的未使用端口。
|
94 |
-
11. 定义函数extract_archive,用于解压归档文件。
|
95 |
-
12. 定义函数find_recent_files,用于查找最近创建的文件。
|
96 |
-
13. 定义函数on_file_uploaded,用于处理上传文件的操作。
|
97 |
-
14. 定义函数on_report_generated,用于处理生成报告文件的操作。
|
98 |
-
|
99 |
-
|
100 |
-
## [10/18] 程序摘要: crazy_functions/生成函数注释.py
|
101 |
-
|
102 |
-
该程序文件是一个Python脚本,文件名为“生成函数注释.py”,位于“./crazy_functions/”目录下。该程序实现了一个批量生成函数注释的功能,可以对指定文件夹下的所有Python和C++源代码文件中的所有函数进行注释,使用Markdown表格输出注释结果。
|
103 |
-
|
104 |
-
该程序引用了predict.py和toolbox.py两个模块,其中predict.py实现了一个基于GPT模型的文本生成功能,用于生成函数注释,而toolbox.py实现了一些工具函数,包括异常处理函数、文本写入函数等。另外,该程序还定义了两个函数,一个是“生成函数注释”函数,用于处理单个文件的注释生成;另一个是“批量生成函数注释”函数,用于批量处理多个文件的注释生成。
|
105 |
-
|
106 |
-
## [11/18] 程序摘要: crazy_functions/读文章写摘要.py
|
107 |
-
|
108 |
-
这个程序文件是一个名为“读文章写摘要”的函数。该函数的输入包括文章的文本内容、top_p(生成文本时选择最可能的词语的概率阈值)、temperature(控制生成文本的随机性的因子)、对话历史等参数,以及一个聊天机器人和一个系统提示的文本。该函数的主要工作是解析一组.tex文件,���后生成一段学术性语言的中文和英文摘要。在解析过程中,该函数使用一个名为“toolbox”的模块中的辅助函数和一个名为“predict”的模块中的函数来执行GPT-2模型的推理工作,然后将结果返回给聊天机器人。另外,该程序还包括一个名为“fast_debug”的bool型变量,用于调试和测试。
|
109 |
-
|
110 |
-
## [12/18] 程序摘要: crazy_functions/代码重写为全英文_多线程.py
|
111 |
-
|
112 |
-
该程序文件实现了一个多线程操作,用于将指定目录下的所有 Python 文件中的中文转化为英文,并将转化后的文件存入另一个目录中。具体实现过程如下:
|
113 |
-
|
114 |
-
1. 集合目标文件路径并清空历史记录。
|
115 |
-
2. 循环目标文件,对每个文件启动一个线程进行任务操作。
|
116 |
-
3. 各个线程同时开始执行任务函数,并在任务完成后将转化后的文件写入指定目录,最终生成一份任务执行报告。
|
117 |
-
|
118 |
-
## [13/18] 程序摘要: crazy_functions/高级功能函数模板.py
|
119 |
-
|
120 |
-
该程序文件名为高级功能函数模板.py,它包含了一个名为“高阶功能模板函数”的函数,这个函数可以作为开发新功能函数的模板。该函数引用了predict.py和toolbox.py文件中的函数。在该函数内部,它首先清空了历史记录,然后对于今天和今天以后的四天,它问用户历史中哪些事件发生在这些日期,并列举两条事件并发送相关的图片。在向用户询问问题时,使用了GPT进行响应。由于请求GPT需要一定的时间,所以函数会在重新显示状态之前等待一段时间。在每次与用户的互动中,使用yield关键字生成器函数来输出聊天机器人的当前状态,包括聊天消息、历史记录和状态('正常')。最后,程序调用write_results_to_file函数将聊天的结果写入文件,以供后续的评估和分析。
|
121 |
-
|
122 |
-
## [14/18] 程序摘要: crazy_functions/总结word文档.py
|
123 |
-
|
124 |
-
该程序文件名为总结word文档.py,主要功能是批量总结Word文档。具体实现过程是解析docx格式和doc格式文件,生成文件内容,然后使用自然语言处理工具对文章内容做中英文概述,最后给出建议。该程序需要依赖python-docx和pywin32,如果没有安装,会给出安装建议。
|
125 |
-
|
126 |
-
## [15/18] 程序摘要: crazy_functions/批量总结PDF文档pdfminer.py
|
127 |
-
|
128 |
-
该程序文件名为pdfminer.py,位于./crazy_functions/目录下。程序实现了批量读取PDF文件,并使用pdfminer解析PDF文件内容。此外,程序还根据解析得到的文本内容,调用机器学习模型生成对每篇文章的概述,最终生成全文摘要。程序中还对模块依赖进行了导入检查,若缺少依赖,则会提供安装建议。
|
129 |
-
|
130 |
-
## [16/18] 程序摘要: crazy_functions/解析项目源代码.py
|
131 |
-
|
132 |
-
这个程序文件中包含了几个函数,分别是:
|
133 |
-
|
134 |
-
1. `解析源代码(file_manifest, project_folder, top_p, api_key, temperature, chatbot, history, systemPromptTxt)`:通过输入文件路径列表对程序文件进行逐文件分析,根据分析结果做出整体功能和构架的概括,并生成包括每个文件功能的markdown表格。
|
135 |
-
2. `解析项目本身(txt, top_p, api_key, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对当前文件夹下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
136 |
-
3. `解析一个Python项目(txt, top_p, api_key, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
137 |
-
4. `解析一个C项目的头文件(txt, top_p, api_key, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有头文件进行逐文件分析,并生成markdown表格。
|
138 |
-
5. `解析一个C项目(txt, top_p, api_key, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有.h、.cpp、.c文件及其子文件夹进行逐文件分析,并生成markdown表格。
|
139 |
-
|
140 |
-
程序中还包含了一些辅助函数和变量,如CatchException装饰器函数,report_execption函数、write_results_to_file函数等。在执行过程中还会调用其他模块中的函数,如toolbox模块的函数和predict模块的函数。
|
141 |
-
|
142 |
-
## [17/18] 程序摘要: crazy_functions/批量总结PDF文档.py
|
143 |
-
|
144 |
-
这个程序文件是一个名为“批量总结PDF文档”的函数插件。它导入了predict和toolbox模块,并定义了一些函数,包括is_paragraph_break,normalize_text和clean_text。这些函数是对输入文本进行预处理和清洗的功能函数。主要的功能函数是解析PDF,它打开每个PDF文件并将其内容存储在file_content变量中,然后传递给聊天机器人,以产生一句话的概括。在解析PDF文件之后,该函数连接了所有文件的摘要,以产生一段学术语言和英文摘要。最后,函数批量处理目标文件夹中的所有PDF文件,并输出结果。
|
145 |
-
|
146 |
-
## 根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
|
147 |
-
|
148 |
-
该程序是一个聊天机器人,使用了OpenAI的GPT语言模型以及一些特殊的辅助功能去处理各种学术写作和科研润色任务。整个程序由一些函数组成,每个函数都代表了不同的学术润色/翻译/其他服务。
|
149 |
-
|
150 |
-
下面是程序中每个文件的功能列表:
|
151 |
-
|
152 |
-
| 文件名 | 功能 |
|
153 |
-
|--------|--------|
|
154 |
-
| functional_crazy.py | 实现高级功能函数模板和其他一些辅助功能函数 |
|
155 |
-
| main.py | 程序的主要入口,负责程序的启动和UI的展示 |
|
156 |
-
| functional.py | 定义各种功能按钮的颜色和响应函数 |
|
157 |
-
| show_math.py | 解析LaTeX文本,将其转换为Markdown格式 |
|
158 |
-
| predict.py | 基础的对话功能,用于与chatGPT进行交互 |
|
159 |
-
| check_proxy.py | 检查代理设置的正确性 |
|
160 |
-
| config_private.py | 配置程序的API密钥和其他私有信息 |
|
161 |
-
| config.py | 配置OpenAI的API参数和程序的其他属性 |
|
162 |
-
| theme.py | 设置程序主题样式 |
|
163 |
-
| toolbox.py | 存放一些辅助函数供程序使用 |
|
164 |
-
| crazy_functions/生成函数注释.py | 生成Python文件中所有函数的注释 |
|
165 |
-
| crazy_functions/读文章写摘要.py | 解析文章文本,生成中英文摘要 |
|
166 |
-
| crazy_functions/代码重写为全英文_多线程.py | 将中文代码内容转化为英文 |
|
167 |
-
| crazy_functions/高级功能函数模板.py | 实现高级功能函数模板 |
|
168 |
-
| crazy_functions/总结word文档.py | 解析Word文件,生成文章内容的概要 |
|
169 |
-
| crazy_functions/批量总结PDF文档pdfminer.py | 解析PDF文件,生成文章内容的概要(使用pdfminer库) |
|
170 |
-
| crazy_functions/批量总结PDF文档.py | 解析PDF文件,生成文章内容的概要(使用PyMuPDF库) |
|
171 |
-
| crazy_functions/解析项目源代码.py | 解析C/C++源代码,生成markdown表格 |
|
172 |
-
| crazy_functions/批量总结PDF文档.py | 对PDF文件进行批量摘要生成 |
|
173 |
-
|
174 |
-
总的来说,该程序提供了一系列的学术润色和翻译的工具,支持对各种类型的文件进行分析和处理。同时也提供了对话式用户界面,便于用户使用和交互。
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_math.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
# This program is written by: https://github.com/polarwinkel/mdtex2html
|
2 |
-
|
3 |
-
from latex2mathml.converter import convert as tex2mathml
|
4 |
-
import re
|
5 |
-
|
6 |
-
incomplete = '<font style="color:orange;" class="tooltip">⚠<span class="tooltiptext">formula incomplete</span></font>'
|
7 |
-
convError = '<font style="color:red" class="tooltip">⚠<span class="tooltiptext">LaTeX-convert-error</span></font>'
|
8 |
-
|
9 |
-
def convert(mdtex, extensions=[], splitParagraphs=True):
|
10 |
-
''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
|
11 |
-
found = False
|
12 |
-
# handle all paragraphs separately (prevents aftereffects)
|
13 |
-
if splitParagraphs:
|
14 |
-
parts = re.split("\n\n", mdtex)
|
15 |
-
result = ''
|
16 |
-
for part in parts:
|
17 |
-
result += convert(part, extensions, splitParagraphs=False)
|
18 |
-
return result
|
19 |
-
# find first $$-formula:
|
20 |
-
parts = re.split('\${2}', mdtex, 2)
|
21 |
-
if len(parts)>1:
|
22 |
-
found = True
|
23 |
-
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
24 |
-
try:
|
25 |
-
result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
|
26 |
-
except:
|
27 |
-
result += '<div class="blockformula">'+convError+'</div>'
|
28 |
-
if len(parts)==3:
|
29 |
-
result += convert(parts[2], extensions, splitParagraphs=False)
|
30 |
-
else:
|
31 |
-
result += '<div class="blockformula">'+incomplete+'</div>'
|
32 |
-
# else find first $-formulas:
|
33 |
-
else:
|
34 |
-
parts = re.split('\${1}', mdtex, 2)
|
35 |
-
if len(parts)>1 and not found:
|
36 |
-
found = True
|
37 |
-
try:
|
38 |
-
mathml = tex2mathml(parts[1])
|
39 |
-
except:
|
40 |
-
mathml = convError
|
41 |
-
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
42 |
-
parts[0]=parts[0]+'​'
|
43 |
-
if len(parts)==3:
|
44 |
-
result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
|
45 |
-
else:
|
46 |
-
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
47 |
-
# else find first \[..\]-equation:
|
48 |
-
else:
|
49 |
-
parts = re.split(r'\\\[', mdtex, 1)
|
50 |
-
if len(parts)>1 and not found:
|
51 |
-
found = True
|
52 |
-
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
53 |
-
parts = re.split(r'\\\]', parts[1], 1)
|
54 |
-
try:
|
55 |
-
result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
|
56 |
-
except:
|
57 |
-
result += '<div class="blockformula">'+convError+'</div>'
|
58 |
-
if len(parts)==2:
|
59 |
-
result += convert(parts[1], extensions, splitParagraphs=False)
|
60 |
-
else:
|
61 |
-
result += '<div class="blockformula">'+incomplete+'</div>'
|
62 |
-
# else find first \(..\)-equation:
|
63 |
-
else:
|
64 |
-
parts = re.split(r'\\\(', mdtex, 1)
|
65 |
-
if len(parts)>1 and not found:
|
66 |
-
found = True
|
67 |
-
subp = re.split(r'\\\)', parts[1], 1)
|
68 |
-
try:
|
69 |
-
mathml = tex2mathml(subp[0])
|
70 |
-
except:
|
71 |
-
mathml = convError
|
72 |
-
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
73 |
-
parts[0]=parts[0]+'​'
|
74 |
-
if len(subp)==2:
|
75 |
-
result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
|
76 |
-
else:
|
77 |
-
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
78 |
-
if not found:
|
79 |
-
result = mdtex
|
80 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
theme.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
|
|
3 |
# gradio可用颜色列表
|
4 |
# gr.themes.utils.colors.slate (石板色)
|
5 |
# gr.themes.utils.colors.gray (灰色)
|
@@ -24,14 +25,16 @@ import gradio as gr
|
|
24 |
# gr.themes.utils.colors.pink (粉红色)
|
25 |
# gr.themes.utils.colors.rose (玫瑰色)
|
26 |
|
|
|
27 |
def adjust_theme():
|
28 |
-
try:
|
29 |
-
color_er = gr.themes.utils.colors.
|
30 |
-
set_theme = gr.themes.Default(
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
35 |
set_theme.set(
|
36 |
# Colors
|
37 |
input_background_fill_dark="*neutral_800",
|
@@ -77,10 +80,12 @@ def adjust_theme():
|
|
77 |
button_cancel_text_color=color_er.c600,
|
78 |
button_cancel_text_color_dark="white",
|
79 |
)
|
80 |
-
except:
|
81 |
-
set_theme = None
|
|
|
82 |
return set_theme
|
83 |
|
|
|
84 |
advanced_css = """
|
85 |
/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
|
86 |
.markdown-body table {
|
@@ -149,4 +154,78 @@ advanced_css = """
|
|
149 |
padding: 1em;
|
150 |
margin: 1em 2em 1em 0.5em;
|
151 |
}
|
152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from toolbox import get_conf
|
3 |
+
CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT')
|
4 |
# gradio可用颜色列表
|
5 |
# gr.themes.utils.colors.slate (石板色)
|
6 |
# gr.themes.utils.colors.gray (灰色)
|
|
|
25 |
# gr.themes.utils.colors.pink (粉红色)
|
26 |
# gr.themes.utils.colors.rose (玫瑰色)
|
27 |
|
28 |
+
|
29 |
def adjust_theme():
|
30 |
+
try:
|
31 |
+
color_er = gr.themes.utils.colors.fuchsia
|
32 |
+
set_theme = gr.themes.Default(
|
33 |
+
primary_hue=gr.themes.utils.colors.orange,
|
34 |
+
neutral_hue=gr.themes.utils.colors.gray,
|
35 |
+
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
|
36 |
+
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
37 |
+
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
38 |
set_theme.set(
|
39 |
# Colors
|
40 |
input_background_fill_dark="*neutral_800",
|
|
|
80 |
button_cancel_text_color=color_er.c600,
|
81 |
button_cancel_text_color_dark="white",
|
82 |
)
|
83 |
+
except:
|
84 |
+
set_theme = None
|
85 |
+
print('gradio版本较旧, 不能自定义字体和颜色')
|
86 |
return set_theme
|
87 |
|
88 |
+
|
89 |
advanced_css = """
|
90 |
/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
|
91 |
.markdown-body table {
|
|
|
154 |
padding: 1em;
|
155 |
margin: 1em 2em 1em 0.5em;
|
156 |
}
|
157 |
+
|
158 |
+
"""
|
159 |
+
|
160 |
+
if CODE_HIGHLIGHT:
|
161 |
+
advanced_css += """
|
162 |
+
|
163 |
+
.hll { background-color: #ffffcc }
|
164 |
+
.c { color: #3D7B7B; font-style: italic } /* Comment */
|
165 |
+
.err { border: 1px solid #FF0000 } /* Error */
|
166 |
+
.k { color: hsl(197, 94%, 51%); font-weight: bold } /* Keyword */
|
167 |
+
.o { color: #666666 } /* Operator */
|
168 |
+
.ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
|
169 |
+
.cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
|
170 |
+
.cp { color: #9C6500 } /* Comment.Preproc */
|
171 |
+
.cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
|
172 |
+
.c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
|
173 |
+
.cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
|
174 |
+
.gd { color: #A00000 } /* Generic.Deleted */
|
175 |
+
.ge { font-style: italic } /* Generic.Emph */
|
176 |
+
.gr { color: #E40000 } /* Generic.Error */
|
177 |
+
.gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
178 |
+
.gi { color: #008400 } /* Generic.Inserted */
|
179 |
+
.go { color: #717171 } /* Generic.Output */
|
180 |
+
.gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
181 |
+
.gs { font-weight: bold } /* Generic.Strong */
|
182 |
+
.gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
183 |
+
.gt { color: #a9dd00 } /* Generic.Traceback */
|
184 |
+
.kc { color: #008000; font-weight: bold } /* Keyword.Constant */
|
185 |
+
.kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
|
186 |
+
.kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
|
187 |
+
.kp { color: #008000 } /* Keyword.Pseudo */
|
188 |
+
.kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
|
189 |
+
.kt { color: #B00040 } /* Keyword.Type */
|
190 |
+
.m { color: #666666 } /* Literal.Number */
|
191 |
+
.s { color: #BA2121 } /* Literal.String */
|
192 |
+
.na { color: #687822 } /* Name.Attribute */
|
193 |
+
.nb { color: #e5f8c3 } /* Name.Builtin */
|
194 |
+
.nc { color: #ffad65; font-weight: bold } /* Name.Class */
|
195 |
+
.no { color: #880000 } /* Name.Constant */
|
196 |
+
.nd { color: #AA22FF } /* Name.Decorator */
|
197 |
+
.ni { color: #717171; font-weight: bold } /* Name.Entity */
|
198 |
+
.ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
|
199 |
+
.nf { color: #f9f978 } /* Name.Function */
|
200 |
+
.nl { color: #767600 } /* Name.Label */
|
201 |
+
.nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
202 |
+
.nt { color: #008000; font-weight: bold } /* Name.Tag */
|
203 |
+
.nv { color: #19177C } /* Name.Variable */
|
204 |
+
.ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
205 |
+
.w { color: #bbbbbb } /* Text.Whitespace */
|
206 |
+
.mb { color: #666666 } /* Literal.Number.Bin */
|
207 |
+
.mf { color: #666666 } /* Literal.Number.Float */
|
208 |
+
.mh { color: #666666 } /* Literal.Number.Hex */
|
209 |
+
.mi { color: #666666 } /* Literal.Number.Integer */
|
210 |
+
.mo { color: #666666 } /* Literal.Number.Oct */
|
211 |
+
.sa { color: #BA2121 } /* Literal.String.Affix */
|
212 |
+
.sb { color: #BA2121 } /* Literal.String.Backtick */
|
213 |
+
.sc { color: #BA2121 } /* Literal.String.Char */
|
214 |
+
.dl { color: #BA2121 } /* Literal.String.Delimiter */
|
215 |
+
.sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
|
216 |
+
.s2 { color: #2bf840 } /* Literal.String.Double */
|
217 |
+
.se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
|
218 |
+
.sh { color: #BA2121 } /* Literal.String.Heredoc */
|
219 |
+
.si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
|
220 |
+
.sx { color: #008000 } /* Literal.String.Other */
|
221 |
+
.sr { color: #A45A77 } /* Literal.String.Regex */
|
222 |
+
.s1 { color: #BA2121 } /* Literal.String.Single */
|
223 |
+
.ss { color: #19177C } /* Literal.String.Symbol */
|
224 |
+
.bp { color: #008000 } /* Name.Builtin.Pseudo */
|
225 |
+
.fm { color: #0000FF } /* Name.Function.Magic */
|
226 |
+
.vc { color: #19177C } /* Name.Variable.Class */
|
227 |
+
.vg { color: #19177C } /* Name.Variable.Global */
|
228 |
+
.vi { color: #19177C } /* Name.Variable.Instance */
|
229 |
+
.vm { color: #19177C } /* Name.Variable.Magic */
|
230 |
+
.il { color: #666666 } /* Literal.Number.Integer.Long */
|
231 |
+
"""
|
toolbox.py
CHANGED
@@ -1,13 +1,72 @@
|
|
1 |
-
import markdown
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from functools import wraps, lru_cache
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
def get_reduce_token_percent(text):
|
|
|
|
|
|
|
6 |
try:
|
7 |
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
8 |
pattern = r"(\d+)\s+tokens\b"
|
9 |
match = re.findall(pattern, text)
|
10 |
-
EXCEED_ALLO = 500
|
11 |
max_limit = float(match[0]) - EXCEED_ALLO
|
12 |
current_tokens = float(match[1])
|
13 |
ratio = max_limit/current_tokens
|
@@ -16,40 +75,43 @@ def get_reduce_token_percent(text):
|
|
16 |
except:
|
17 |
return 0.5, '不详'
|
18 |
|
19 |
-
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot,
|
20 |
"""
|
|
|
|
|
21 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
22 |
i_say: 当前输入
|
23 |
i_say_show_user: 显示到对话界面上的当前输入,例如,输入整个文件时,你绝对不想把文件的内容都糊到对话界面上
|
24 |
chatbot: 对话界面句柄
|
25 |
-
top_p,
|
26 |
history: gpt参数 对话历史
|
27 |
sys_prompt: gpt参数 sys_prompt
|
28 |
-
long_connection:
|
29 |
"""
|
30 |
import time
|
31 |
-
from
|
32 |
from toolbox import get_conf
|
33 |
TIMEOUT_SECONDS, MAX_RETRY = get_conf('TIMEOUT_SECONDS', 'MAX_RETRY')
|
34 |
# 多线程的时候,需要一个mutable结构在不同线程之间传递信息
|
35 |
# list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
|
36 |
mutable = [None, '']
|
37 |
# multi-threading worker
|
|
|
38 |
def mt(i_say, history):
|
39 |
while True:
|
40 |
try:
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, api_key=api_key, temperature=temperature, history=history, sys_prompt=sys_prompt)
|
45 |
-
break
|
46 |
except ConnectionAbortedError as token_exceeded_error:
|
47 |
# 尝试计算比例,尽可能多地保留文本
|
48 |
-
p_ratio, n_exceed = get_reduce_token_percent(
|
|
|
49 |
if len(history) > 0:
|
50 |
-
history = [his[
|
|
|
51 |
else:
|
52 |
-
i_say = i_say[: int(len(i_say)
|
53 |
mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
|
54 |
except TimeoutError as e:
|
55 |
mutable[0] = '[Local Message] 请求超时。'
|
@@ -58,42 +120,51 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, api_
|
|
58 |
mutable[0] = f'[Local Message] 异常:{str(e)}.'
|
59 |
raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
|
60 |
# 创建新线程发出http请求
|
61 |
-
thread_name = threading.Thread(target=mt, args=(i_say, history))
|
|
|
62 |
# 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
|
63 |
cnt = 0
|
64 |
while thread_name.is_alive():
|
65 |
cnt += 1
|
66 |
-
chatbot[-1] = (i_say_show_user,
|
67 |
-
|
|
|
68 |
time.sleep(1)
|
69 |
# 把gpt的输出从mutable中取出来
|
70 |
gpt_say = mutable[0]
|
71 |
-
if gpt_say=='[Local Message] Failed with timeout.':
|
|
|
72 |
return gpt_say
|
73 |
|
|
|
74 |
def write_results_to_file(history, file_name=None):
|
75 |
"""
|
76 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
77 |
"""
|
78 |
-
import os
|
|
|
79 |
if file_name is None:
|
80 |
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
81 |
-
file_name = 'chatGPT分析报告' +
|
|
|
82 |
os.makedirs('./gpt_log/', exist_ok=True)
|
83 |
-
with open(f'./gpt_log/{file_name}', 'w', encoding
|
84 |
f.write('# chatGPT 分析报告\n')
|
85 |
for i, content in enumerate(history):
|
86 |
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
87 |
-
if type(content) != str:
|
|
|
88 |
except:
|
89 |
continue
|
90 |
-
if i%2==0:
|
|
|
91 |
f.write(content)
|
92 |
f.write('\n\n')
|
93 |
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
94 |
print(res)
|
95 |
return res
|
96 |
|
|
|
97 |
def regular_txt_to_markdown(text):
|
98 |
"""
|
99 |
将普通文本转换为Markdown格式的文本。
|
@@ -103,27 +174,37 @@ def regular_txt_to_markdown(text):
|
|
103 |
text = text.replace('\n\n\n', '\n\n')
|
104 |
return text
|
105 |
|
|
|
106 |
def CatchException(f):
|
107 |
"""
|
108 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
109 |
"""
|
110 |
@wraps(f)
|
111 |
-
def decorated(txt, top_p,
|
112 |
try:
|
113 |
-
yield from f(txt, top_p,
|
114 |
except Exception as e:
|
115 |
from check_proxy import check_proxy
|
116 |
from toolbox import get_conf
|
117 |
proxies, = get_conf('proxies')
|
118 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
119 |
-
if len(chatbot) == 0:
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
return decorated
|
123 |
|
|
|
124 |
def HotReload(f):
|
125 |
"""
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
"""
|
128 |
@wraps(f)
|
129 |
def decorated(*args, **kwargs):
|
@@ -132,12 +213,15 @@ def HotReload(f):
|
|
132 |
yield from f_hot_reload(*args, **kwargs)
|
133 |
return decorated
|
134 |
|
|
|
135 |
def report_execption(chatbot, history, a, b):
|
136 |
"""
|
137 |
向chatbot中添加错误信息
|
138 |
"""
|
139 |
chatbot.append((a, b))
|
140 |
-
history.append(a)
|
|
|
|
|
141 |
|
142 |
def text_divide_paragraph(text):
|
143 |
"""
|
@@ -154,23 +238,88 @@ def text_divide_paragraph(text):
|
|
154 |
text = "</br>".join(lines)
|
155 |
return text
|
156 |
|
|
|
157 |
def markdown_convertion(txt):
|
158 |
"""
|
159 |
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
160 |
"""
|
161 |
pre = '<div class="markdown-body">'
|
162 |
suf = '</div>'
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
else:
|
166 |
-
return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
|
|
|
167 |
|
168 |
def close_up_code_segment_during_stream(gpt_reply):
|
169 |
"""
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
"""
|
172 |
-
if '```' not in gpt_reply:
|
173 |
-
|
|
|
|
|
174 |
|
175 |
# 排除了以上两个情况,我们
|
176 |
segments = gpt_reply.split('```')
|
@@ -180,19 +329,19 @@ def close_up_code_segment_during_stream(gpt_reply):
|
|
180 |
return gpt_reply+'\n```'
|
181 |
else:
|
182 |
return gpt_reply
|
183 |
-
|
184 |
|
185 |
|
186 |
def format_io(self, y):
|
187 |
"""
|
188 |
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
189 |
"""
|
190 |
-
if y is None or y == []:
|
|
|
191 |
i_ask, gpt_reply = y[-1]
|
192 |
-
i_ask = text_divide_paragraph(i_ask)
|
193 |
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
194 |
y[-1] = (
|
195 |
-
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
|
196 |
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
197 |
)
|
198 |
return y
|
@@ -254,6 +403,7 @@ def extract_archive(file_path, dest_dir):
|
|
254 |
return ''
|
255 |
return ''
|
256 |
|
|
|
257 |
def find_recent_files(directory):
|
258 |
"""
|
259 |
me: find files that is created with in one minutes under a directory with python, write a function
|
@@ -267,21 +417,29 @@ def find_recent_files(directory):
|
|
267 |
|
268 |
for filename in os.listdir(directory):
|
269 |
file_path = os.path.join(directory, filename)
|
270 |
-
if file_path.endswith('.log'):
|
271 |
-
|
|
|
272 |
if created_time >= one_minute_ago:
|
273 |
-
if os.path.isdir(file_path):
|
|
|
274 |
recent_files.append(file_path)
|
275 |
|
276 |
return recent_files
|
277 |
|
278 |
|
279 |
def on_file_uploaded(files, chatbot, txt):
|
280 |
-
if len(files) == 0:
|
281 |
-
|
|
|
|
|
|
|
|
|
282 |
from toolbox import extract_archive
|
283 |
-
try:
|
284 |
-
|
|
|
|
|
285 |
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
286 |
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
287 |
err_msg = ''
|
@@ -289,13 +447,14 @@ def on_file_uploaded(files, chatbot, txt):
|
|
289 |
file_origin_name = os.path.basename(file.orig_name)
|
290 |
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
291 |
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
292 |
-
|
293 |
-
moved_files = [fp for fp in glob.glob(
|
|
|
294 |
txt = f'private_upload/{time_tag}'
|
295 |
moved_files_str = '\t\n\n'.join(moved_files)
|
296 |
chatbot.append(['我上传了文件,请查收',
|
297 |
-
f'[Local Message] 收到以下文件: \n\n{moved_files_str}'+
|
298 |
-
f'\n\n
|
299 |
f'\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'+err_msg])
|
300 |
return chatbot, txt
|
301 |
|
@@ -303,32 +462,40 @@ def on_file_uploaded(files, chatbot, txt):
|
|
303 |
def on_report_generated(files, chatbot):
|
304 |
from toolbox import find_recent_files
|
305 |
report_files = find_recent_files('gpt_log')
|
306 |
-
if len(report_files) == 0:
|
|
|
307 |
# files.extend(report_files)
|
308 |
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
309 |
return report_files, chatbot
|
310 |
|
|
|
|
|
|
|
|
|
|
|
311 |
@lru_cache(maxsize=128)
|
312 |
def read_single_conf_with_lru_cache(arg):
|
313 |
-
|
314 |
-
|
|
|
|
|
|
|
315 |
# 在读取API_KEY时,检查一下是不是忘了改config
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
print('[PROXY]
|
327 |
-
else:
|
328 |
-
print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
329 |
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
330 |
return r
|
331 |
|
|
|
332 |
def get_conf(*args):
|
333 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
334 |
res = []
|
@@ -337,8 +504,26 @@ def get_conf(*args):
|
|
337 |
res.append(r)
|
338 |
return res
|
339 |
|
|
|
340 |
def clear_line_break(txt):
|
341 |
txt = txt.replace('\n', ' ')
|
342 |
txt = txt.replace(' ', ' ')
|
343 |
txt = txt.replace(' ', ' ')
|
344 |
return txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import markdown
|
2 |
+
import mdtex2html
|
3 |
+
import threading
|
4 |
+
import importlib
|
5 |
+
import traceback
|
6 |
+
import inspect
|
7 |
+
import re
|
8 |
+
from latex2mathml.converter import convert as tex2mathml
|
9 |
from functools import wraps, lru_cache
|
10 |
|
11 |
+
############################### 插件输入输出接驳区 #######################################
|
12 |
+
class ChatBotWithCookies(list):
|
13 |
+
def __init__(self, cookie):
|
14 |
+
self._cookies = cookie
|
15 |
+
|
16 |
+
def write_list(self, list):
|
17 |
+
for t in list:
|
18 |
+
self.append(t)
|
19 |
+
|
20 |
+
def get_list(self):
|
21 |
+
return [t for t in self]
|
22 |
+
|
23 |
+
def get_cookies(self):
|
24 |
+
return self._cookies
|
25 |
+
|
26 |
+
def ArgsGeneralWrapper(f):
|
27 |
+
"""
|
28 |
+
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
29 |
+
"""
|
30 |
+
def decorated(cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
|
31 |
+
txt_passon = txt
|
32 |
+
if txt == "" and txt2 != "": txt_passon = txt2
|
33 |
+
# 引入一个有cookie的chatbot
|
34 |
+
cookies.update({
|
35 |
+
'top_p':top_p,
|
36 |
+
'temperature':temperature,
|
37 |
+
})
|
38 |
+
llm_kwargs = {
|
39 |
+
'api_key': cookies['api_key'],
|
40 |
+
'llm_model': cookies['llm_model'],
|
41 |
+
'top_p':top_p,
|
42 |
+
'temperature':temperature,
|
43 |
+
}
|
44 |
+
plugin_kwargs = {
|
45 |
+
# 目前还没有
|
46 |
+
}
|
47 |
+
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
48 |
+
chatbot_with_cookie.write_list(chatbot)
|
49 |
+
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
50 |
+
return decorated
|
51 |
+
|
52 |
+
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
53 |
+
"""
|
54 |
+
刷新用户界面
|
55 |
+
"""
|
56 |
+
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
57 |
+
yield chatbot.get_cookies(), chatbot, history, msg
|
58 |
+
############################### ################## #######################################
|
59 |
+
##########################################################################################
|
60 |
+
|
61 |
def get_reduce_token_percent(text):
|
62 |
+
"""
|
63 |
+
* 此函数未来将被弃用
|
64 |
+
"""
|
65 |
try:
|
66 |
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
67 |
pattern = r"(\d+)\s+tokens\b"
|
68 |
match = re.findall(pattern, text)
|
69 |
+
EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
|
70 |
max_limit = float(match[0]) - EXCEED_ALLO
|
71 |
current_tokens = float(match[1])
|
72 |
ratio = max_limit/current_tokens
|
|
|
75 |
except:
|
76 |
return 0.5, '不详'
|
77 |
|
78 |
+
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, llm_kwargs, history=[], sys_prompt='', long_connection=True):
|
79 |
"""
|
80 |
+
* 此函数未来将被弃用(替代函数 request_gpt_model_in_new_thread_with_ui_alive 文件 chatgpt_academic/crazy_functions/crazy_utils)
|
81 |
+
|
82 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
83 |
i_say: 当前输入
|
84 |
i_say_show_user: 显示到对话界面上的当前输入,例如,输入整个文件时,你绝对不想把文件的内容都糊到对话界面上
|
85 |
chatbot: 对话界面句柄
|
86 |
+
top_p, temperature: gpt参数
|
87 |
history: gpt参数 对话历史
|
88 |
sys_prompt: gpt参数 sys_prompt
|
89 |
+
long_connection: 是否采用更稳定的连接方式(推荐)(已弃用)
|
90 |
"""
|
91 |
import time
|
92 |
+
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
93 |
from toolbox import get_conf
|
94 |
TIMEOUT_SECONDS, MAX_RETRY = get_conf('TIMEOUT_SECONDS', 'MAX_RETRY')
|
95 |
# 多线程的时候,需要一个mutable结构在不同线程之间传递信息
|
96 |
# list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
|
97 |
mutable = [None, '']
|
98 |
# multi-threading worker
|
99 |
+
|
100 |
def mt(i_say, history):
|
101 |
while True:
|
102 |
try:
|
103 |
+
mutable[0] = predict_no_ui_long_connection(
|
104 |
+
inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt)
|
105 |
+
|
|
|
|
|
106 |
except ConnectionAbortedError as token_exceeded_error:
|
107 |
# 尝试计算比例,尽可能多地保留文本
|
108 |
+
p_ratio, n_exceed = get_reduce_token_percent(
|
109 |
+
str(token_exceeded_error))
|
110 |
if len(history) > 0:
|
111 |
+
history = [his[int(len(his) * p_ratio):]
|
112 |
+
for his in history if his is not None]
|
113 |
else:
|
114 |
+
i_say = i_say[: int(len(i_say) * p_ratio)]
|
115 |
mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
|
116 |
except TimeoutError as e:
|
117 |
mutable[0] = '[Local Message] 请求超时。'
|
|
|
120 |
mutable[0] = f'[Local Message] 异常:{str(e)}.'
|
121 |
raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
|
122 |
# 创建新线程发出http请求
|
123 |
+
thread_name = threading.Thread(target=mt, args=(i_say, history))
|
124 |
+
thread_name.start()
|
125 |
# 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
|
126 |
cnt = 0
|
127 |
while thread_name.is_alive():
|
128 |
cnt += 1
|
129 |
+
chatbot[-1] = (i_say_show_user,
|
130 |
+
f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt % 4)))
|
131 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
132 |
time.sleep(1)
|
133 |
# 把gpt的输出从mutable中取出来
|
134 |
gpt_say = mutable[0]
|
135 |
+
if gpt_say == '[Local Message] Failed with timeout.':
|
136 |
+
raise TimeoutError
|
137 |
return gpt_say
|
138 |
|
139 |
+
|
140 |
def write_results_to_file(history, file_name=None):
|
141 |
"""
|
142 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
143 |
"""
|
144 |
+
import os
|
145 |
+
import time
|
146 |
if file_name is None:
|
147 |
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
148 |
+
file_name = 'chatGPT分析报告' + \
|
149 |
+
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
150 |
os.makedirs('./gpt_log/', exist_ok=True)
|
151 |
+
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
152 |
f.write('# chatGPT 分析报告\n')
|
153 |
for i, content in enumerate(history):
|
154 |
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
155 |
+
if type(content) != str:
|
156 |
+
content = str(content)
|
157 |
except:
|
158 |
continue
|
159 |
+
if i % 2 == 0:
|
160 |
+
f.write('## ')
|
161 |
f.write(content)
|
162 |
f.write('\n\n')
|
163 |
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
164 |
print(res)
|
165 |
return res
|
166 |
|
167 |
+
|
168 |
def regular_txt_to_markdown(text):
|
169 |
"""
|
170 |
将普通文本转换为Markdown格式的文本。
|
|
|
174 |
text = text.replace('\n\n\n', '\n\n')
|
175 |
return text
|
176 |
|
177 |
+
|
178 |
def CatchException(f):
|
179 |
"""
|
180 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
181 |
"""
|
182 |
@wraps(f)
|
183 |
+
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
184 |
try:
|
185 |
+
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
186 |
except Exception as e:
|
187 |
from check_proxy import check_proxy
|
188 |
from toolbox import get_conf
|
189 |
proxies, = get_conf('proxies')
|
190 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
191 |
+
if chatbot is None or len(chatbot) == 0:
|
192 |
+
chatbot = [["插件调度异常", "异常原因"]]
|
193 |
+
chatbot[-1] = (chatbot[-1][0],
|
194 |
+
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
195 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
|
196 |
return decorated
|
197 |
|
198 |
+
|
199 |
def HotReload(f):
|
200 |
"""
|
201 |
+
HotReload的装饰器函数,用于实现Python函数插件的热更新。
|
202 |
+
函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
|
203 |
+
在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
|
204 |
+
内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
|
205 |
+
然后通过getattr函数获取函数名,并在新模块中重新加载函数。
|
206 |
+
最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
|
207 |
+
最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
|
208 |
"""
|
209 |
@wraps(f)
|
210 |
def decorated(*args, **kwargs):
|
|
|
213 |
yield from f_hot_reload(*args, **kwargs)
|
214 |
return decorated
|
215 |
|
216 |
+
|
217 |
def report_execption(chatbot, history, a, b):
|
218 |
"""
|
219 |
向chatbot中添加错误信息
|
220 |
"""
|
221 |
chatbot.append((a, b))
|
222 |
+
history.append(a)
|
223 |
+
history.append(b)
|
224 |
+
|
225 |
|
226 |
def text_divide_paragraph(text):
|
227 |
"""
|
|
|
238 |
text = "</br>".join(lines)
|
239 |
return text
|
240 |
|
241 |
+
|
242 |
def markdown_convertion(txt):
|
243 |
"""
|
244 |
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
245 |
"""
|
246 |
pre = '<div class="markdown-body">'
|
247 |
suf = '</div>'
|
248 |
+
markdown_extension_configs = {
|
249 |
+
'mdx_math': {
|
250 |
+
'enable_dollar_delimiter': True,
|
251 |
+
'use_gitlab_delimiters': False,
|
252 |
+
},
|
253 |
+
}
|
254 |
+
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
255 |
+
|
256 |
+
def tex2mathml_catch_exception(content, *args, **kwargs):
|
257 |
+
try:
|
258 |
+
content = tex2mathml(content, *args, **kwargs)
|
259 |
+
except:
|
260 |
+
content = content
|
261 |
+
return content
|
262 |
+
|
263 |
+
def replace_math_no_render(match):
|
264 |
+
content = match.group(1)
|
265 |
+
if 'mode=display' in match.group(0):
|
266 |
+
content = content.replace('\n', '</br>')
|
267 |
+
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
268 |
+
else:
|
269 |
+
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
270 |
+
|
271 |
+
def replace_math_render(match):
|
272 |
+
content = match.group(1)
|
273 |
+
if 'mode=display' in match.group(0):
|
274 |
+
if '\\begin{aligned}' in content:
|
275 |
+
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
276 |
+
content = content.replace('\\end{aligned}', '\\end{array}')
|
277 |
+
content = content.replace('&', ' ')
|
278 |
+
content = tex2mathml_catch_exception(content, display="block")
|
279 |
+
return content
|
280 |
+
else:
|
281 |
+
return tex2mathml_catch_exception(content)
|
282 |
+
|
283 |
+
def markdown_bug_hunt(content):
|
284 |
+
"""
|
285 |
+
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
286 |
+
"""
|
287 |
+
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
288 |
+
content = content.replace('</script>\n</script>', '</script>')
|
289 |
+
return content
|
290 |
+
|
291 |
+
|
292 |
+
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
293 |
+
# convert everything to html format
|
294 |
+
split = markdown.markdown(text='---')
|
295 |
+
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
296 |
+
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
297 |
+
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
298 |
+
# 1. convert to easy-to-copy tex (do not render math)
|
299 |
+
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
300 |
+
# 2. convert to rendered equation
|
301 |
+
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
302 |
+
# cat them together
|
303 |
+
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
304 |
else:
|
305 |
+
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
306 |
+
|
307 |
|
308 |
def close_up_code_segment_during_stream(gpt_reply):
|
309 |
"""
|
310 |
+
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
311 |
+
|
312 |
+
Args:
|
313 |
+
gpt_reply (str): GPT模型返回的回复字符串。
|
314 |
+
|
315 |
+
Returns:
|
316 |
+
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
317 |
+
|
318 |
"""
|
319 |
+
if '```' not in gpt_reply:
|
320 |
+
return gpt_reply
|
321 |
+
if gpt_reply.endswith('```'):
|
322 |
+
return gpt_reply
|
323 |
|
324 |
# 排除了以上两个情况,我们
|
325 |
segments = gpt_reply.split('```')
|
|
|
329 |
return gpt_reply+'\n```'
|
330 |
else:
|
331 |
return gpt_reply
|
|
|
332 |
|
333 |
|
334 |
def format_io(self, y):
|
335 |
"""
|
336 |
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
337 |
"""
|
338 |
+
if y is None or y == []:
|
339 |
+
return []
|
340 |
i_ask, gpt_reply = y[-1]
|
341 |
+
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
342 |
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
343 |
y[-1] = (
|
344 |
+
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
345 |
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
346 |
)
|
347 |
return y
|
|
|
403 |
return ''
|
404 |
return ''
|
405 |
|
406 |
+
|
407 |
def find_recent_files(directory):
|
408 |
"""
|
409 |
me: find files that is created with in one minutes under a directory with python, write a function
|
|
|
417 |
|
418 |
for filename in os.listdir(directory):
|
419 |
file_path = os.path.join(directory, filename)
|
420 |
+
if file_path.endswith('.log'):
|
421 |
+
continue
|
422 |
+
created_time = os.path.getmtime(file_path)
|
423 |
if created_time >= one_minute_ago:
|
424 |
+
if os.path.isdir(file_path):
|
425 |
+
continue
|
426 |
recent_files.append(file_path)
|
427 |
|
428 |
return recent_files
|
429 |
|
430 |
|
431 |
def on_file_uploaded(files, chatbot, txt):
|
432 |
+
if len(files) == 0:
|
433 |
+
return chatbot, txt
|
434 |
+
import shutil
|
435 |
+
import os
|
436 |
+
import time
|
437 |
+
import glob
|
438 |
from toolbox import extract_archive
|
439 |
+
try:
|
440 |
+
shutil.rmtree('./private_upload/')
|
441 |
+
except:
|
442 |
+
pass
|
443 |
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
444 |
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
445 |
err_msg = ''
|
|
|
447 |
file_origin_name = os.path.basename(file.orig_name)
|
448 |
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
449 |
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
450 |
+
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
451 |
+
moved_files = [fp for fp in glob.glob(
|
452 |
+
'private_upload/**/*', recursive=True)]
|
453 |
txt = f'private_upload/{time_tag}'
|
454 |
moved_files_str = '\t\n\n'.join(moved_files)
|
455 |
chatbot.append(['我上传了文件,请查收',
|
456 |
+
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
457 |
+
f'\n\n���用路径参数已自动修正到: \n\n{txt}' +
|
458 |
f'\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'+err_msg])
|
459 |
return chatbot, txt
|
460 |
|
|
|
462 |
def on_report_generated(files, chatbot):
|
463 |
from toolbox import find_recent_files
|
464 |
report_files = find_recent_files('gpt_log')
|
465 |
+
if len(report_files) == 0:
|
466 |
+
return None, chatbot
|
467 |
# files.extend(report_files)
|
468 |
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
469 |
return report_files, chatbot
|
470 |
|
471 |
+
def is_openai_api_key(key):
|
472 |
+
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
473 |
+
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
474 |
+
return API_MATCH
|
475 |
+
|
476 |
@lru_cache(maxsize=128)
|
477 |
def read_single_conf_with_lru_cache(arg):
|
478 |
+
from colorful import print亮红, print亮绿
|
479 |
+
try:
|
480 |
+
r = getattr(importlib.import_module('config_private'), arg)
|
481 |
+
except:
|
482 |
+
r = getattr(importlib.import_module('config'), arg)
|
483 |
# 在读取API_KEY时,检查一下是不是忘了改config
|
484 |
+
if arg == 'API_KEY':
|
485 |
+
if is_openai_api_key(r):
|
486 |
+
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
487 |
+
else:
|
488 |
+
print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
489 |
+
"(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)")
|
490 |
+
if arg == 'proxies':
|
491 |
+
if r is None:
|
492 |
+
print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
|
493 |
+
else:
|
494 |
+
print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
|
|
|
|
495 |
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
496 |
return r
|
497 |
|
498 |
+
|
499 |
def get_conf(*args):
|
500 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
501 |
res = []
|
|
|
504 |
res.append(r)
|
505 |
return res
|
506 |
|
507 |
+
|
508 |
def clear_line_break(txt):
|
509 |
txt = txt.replace('\n', ' ')
|
510 |
txt = txt.replace(' ', ' ')
|
511 |
txt = txt.replace(' ', ' ')
|
512 |
return txt
|
513 |
+
|
514 |
+
|
515 |
+
class DummyWith():
|
516 |
+
"""
|
517 |
+
这段代码定义了一个名为DummyWith的空上下文管理器,
|
518 |
+
它的作用是……额……没用,即在代码结构不变得情况下取代其他的上下文管理器。
|
519 |
+
上下文管理器是一种Python对象,用于与with语句一起使用,
|
520 |
+
以确保一些资源在代码块执行期间得到正确的初始化和清理。
|
521 |
+
上下文管理器必须实现两个方法,分别为 __enter__()和 __exit__()。
|
522 |
+
在上下文执行开始的情况下,__enter__()方法会在代码块被执行前被调用,
|
523 |
+
而在上下文执行结束时,__exit__()方法则会被调用。
|
524 |
+
"""
|
525 |
+
def __enter__(self):
|
526 |
+
return self
|
527 |
+
|
528 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
529 |
+
return
|
version
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": 2.67,
|
3 |
+
"show_feature": true,
|
4 |
+
"new_feature": "很抱歉2.6有bug,现已经紧急修复<->现可通过输入区更新临时api-key <-> 增强多线程稳定性(涉及代码解析、PDF翻译、自译解等) <-> 修复Token计数错误(解决PDF翻译的分割不合理的问题) <-> 如果一键更新失败,可前往github手动更新"
|
5 |
+
}
|
步骤
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
top_p, temperature
|
2 |
-
top_p, api_key, temperature
|
3 |
-
|
4 |
-
top_p=top_p, api_key=api_key, temperature=temperature
|
5 |
-
top_p=top_p, api_key=api_key, temperature=temperature
|
6 |
-
|
7 |
-
|
8 |
-
with gr.Column(scale=1):
|
9 |
-
with gr.Row():
|
10 |
-
api_key = gr.Textbox(show_label=False, placeholder="输入API_KEY,输入后自动生效.").style(container=False)
|
11 |
-
with gr.Row():
|
12 |
-
txt = gr.Textbox(show_label=False, placeholder="输入问题.").style(container=False)
|
13 |
-
|
14 |
-
|
15 |
-
# 在读取API_KEY时,检查一下是不是忘了改config
|
16 |
-
# if arg=='API_KEY':
|
17 |
-
# # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
18 |
-
# API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
|
19 |
-
# if API_MATCH:
|
20 |
-
# print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
21 |
-
# else:
|
22 |
-
# assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
23 |
-
# "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)"
|
24 |
-
|
25 |
-
|
26 |
-
f"Bearer {api_key}"
|
27 |
-
f"Bearer {api_key}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|