Spaces:
Running
Running
c1
Browse files- config.py +0 -3
- crazy_functions/生成函数注释.py +2 -2
- crazy_functions/解析项目源代码.py +11 -11
- crazy_functions/读文章写摘要.py +3 -3
- crazy_functions/高级功能函数模板.py +2 -2
- main.py +7 -4
- predict.py +5 -5
- toolbox.py +4 -4
config.py
CHANGED
@@ -24,6 +24,3 @@ MAX_RETRY = 2
|
|
24 |
# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
|
25 |
LLM_MODEL = "gpt-3.5-turbo"
|
26 |
|
27 |
-
# 检查一下是不是忘了改config
|
28 |
-
if API_KEY == "sk-此处填API秘钥":
|
29 |
-
assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
|
|
|
24 |
# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
|
25 |
LLM_MODEL = "gpt-3.5-turbo"
|
26 |
|
|
|
|
|
|
crazy_functions/生成函数注释.py
CHANGED
@@ -19,7 +19,7 @@ def 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbo
|
|
19 |
if not fast_debug:
|
20 |
msg = '正常'
|
21 |
# ** gpt request **
|
22 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
23 |
|
24 |
print('[2] end gpt req')
|
25 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
@@ -37,7 +37,7 @@ def 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbo
|
|
37 |
|
38 |
|
39 |
@CatchException
|
40 |
-
def 批量生成函数注释(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
41 |
history = [] # 清空历史,以免输入溢出
|
42 |
import glob, os
|
43 |
if os.path.exists(txt):
|
|
|
19 |
if not fast_debug:
|
20 |
msg = '正常'
|
21 |
# ** gpt request **
|
22 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
23 |
|
24 |
print('[2] end gpt req')
|
25 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
|
|
37 |
|
38 |
|
39 |
@CatchException
|
40 |
+
def 批量生成函数注释(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
41 |
history = [] # 清空历史,以免输入溢出
|
42 |
import glob, os
|
43 |
if os.path.exists(txt):
|
crazy_functions/解析项目源代码.py
CHANGED
@@ -2,7 +2,7 @@ from predict import predict_no_ui
|
|
2 |
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
3 |
fast_debug = False
|
4 |
|
5 |
-
def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
6 |
import time, glob, os
|
7 |
print('begin analysis on:', file_manifest)
|
8 |
for index, fp in enumerate(file_manifest):
|
@@ -19,7 +19,7 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot,
|
|
19 |
msg = '正常'
|
20 |
|
21 |
# ** gpt request **
|
22 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
23 |
|
24 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
25 |
history.append(i_say_show_user); history.append(gpt_say)
|
@@ -34,7 +34,7 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot,
|
|
34 |
if not fast_debug:
|
35 |
msg = '正常'
|
36 |
# ** gpt request **
|
37 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
38 |
|
39 |
chatbot[-1] = (i_say, gpt_say)
|
40 |
history.append(i_say); history.append(gpt_say)
|
@@ -47,7 +47,7 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot,
|
|
47 |
|
48 |
|
49 |
@CatchException
|
50 |
-
def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
51 |
history = [] # 清空历史,以免输入溢出
|
52 |
import time, glob, os
|
53 |
file_manifest = [f for f in glob.glob('*.py')]
|
@@ -64,8 +64,8 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx
|
|
64 |
|
65 |
if not fast_debug:
|
66 |
# ** gpt request **
|
67 |
-
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
|
68 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
69 |
|
70 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
71 |
history.append(i_say_show_user); history.append(gpt_say)
|
@@ -78,8 +78,8 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx
|
|
78 |
|
79 |
if not fast_debug:
|
80 |
# ** gpt request **
|
81 |
-
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
82 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
83 |
|
84 |
chatbot[-1] = (i_say, gpt_say)
|
85 |
history.append(i_say); history.append(gpt_say)
|
@@ -89,7 +89,7 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx
|
|
89 |
yield chatbot, history, '正常'
|
90 |
|
91 |
@CatchException
|
92 |
-
def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
93 |
history = [] # 清空历史,以免输入溢出
|
94 |
import glob, os
|
95 |
if os.path.exists(txt):
|
@@ -108,7 +108,7 @@ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPr
|
|
108 |
|
109 |
|
110 |
@CatchException
|
111 |
-
def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
112 |
history = [] # 清空历史,以免输入溢出
|
113 |
import glob, os
|
114 |
if os.path.exists(txt):
|
@@ -128,7 +128,7 @@ def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, s
|
|
128 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
129 |
|
130 |
@CatchException
|
131 |
-
def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
132 |
history = [] # 清空历史,以免输入溢出
|
133 |
import glob, os
|
134 |
if os.path.exists(txt):
|
|
|
2 |
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
3 |
fast_debug = False
|
4 |
|
5 |
+
def 解析源代码(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
6 |
import time, glob, os
|
7 |
print('begin analysis on:', file_manifest)
|
8 |
for index, fp in enumerate(file_manifest):
|
|
|
19 |
msg = '正常'
|
20 |
|
21 |
# ** gpt request **
|
22 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
23 |
|
24 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
25 |
history.append(i_say_show_user); history.append(gpt_say)
|
|
|
34 |
if not fast_debug:
|
35 |
msg = '正常'
|
36 |
# ** gpt request **
|
37 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
38 |
|
39 |
chatbot[-1] = (i_say, gpt_say)
|
40 |
history.append(i_say); history.append(gpt_say)
|
|
|
47 |
|
48 |
|
49 |
@CatchException
|
50 |
+
def 解析项目本身(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
51 |
history = [] # 清空历史,以免输入溢出
|
52 |
import time, glob, os
|
53 |
file_manifest = [f for f in glob.glob('*.py')]
|
|
|
64 |
|
65 |
if not fast_debug:
|
66 |
# ** gpt request **
|
67 |
+
# gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature)
|
68 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
69 |
|
70 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
71 |
history.append(i_say_show_user); history.append(gpt_say)
|
|
|
78 |
|
79 |
if not fast_debug:
|
80 |
# ** gpt request **
|
81 |
+
# gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
82 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
83 |
|
84 |
chatbot[-1] = (i_say, gpt_say)
|
85 |
history.append(i_say); history.append(gpt_say)
|
|
|
89 |
yield chatbot, history, '正常'
|
90 |
|
91 |
@CatchException
|
92 |
+
def 解析一个Python项目(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
93 |
history = [] # 清空历史,以免输入溢出
|
94 |
import glob, os
|
95 |
if os.path.exists(txt):
|
|
|
108 |
|
109 |
|
110 |
@CatchException
|
111 |
+
def 解析一个C项目的头文件(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
112 |
history = [] # 清空历史,以免输入溢出
|
113 |
import glob, os
|
114 |
if os.path.exists(txt):
|
|
|
128 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
129 |
|
130 |
@CatchException
|
131 |
+
def 解析一个C项目(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
132 |
history = [] # 清空历史,以免输入溢出
|
133 |
import glob, os
|
134 |
if os.path.exists(txt):
|
crazy_functions/读文章写摘要.py
CHANGED
@@ -20,7 +20,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
|
|
20 |
if not fast_debug:
|
21 |
msg = '正常'
|
22 |
# ** gpt request **
|
23 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
24 |
|
25 |
print('[2] end gpt req')
|
26 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
@@ -38,7 +38,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
|
|
38 |
if not fast_debug:
|
39 |
msg = '正常'
|
40 |
# ** gpt request **
|
41 |
-
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
42 |
|
43 |
chatbot[-1] = (i_say, gpt_say)
|
44 |
history.append(i_say); history.append(gpt_say)
|
@@ -50,7 +50,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
|
|
50 |
|
51 |
|
52 |
@CatchException
|
53 |
-
def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
54 |
history = [] # 清空历史,以免输入溢出
|
55 |
import glob, os
|
56 |
if os.path.exists(txt):
|
|
|
20 |
if not fast_debug:
|
21 |
msg = '正常'
|
22 |
# ** gpt request **
|
23 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
24 |
|
25 |
print('[2] end gpt req')
|
26 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
|
|
38 |
if not fast_debug:
|
39 |
msg = '正常'
|
40 |
# ** gpt request **
|
41 |
+
gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
42 |
|
43 |
chatbot[-1] = (i_say, gpt_say)
|
44 |
history.append(i_say); history.append(gpt_say)
|
|
|
50 |
|
51 |
|
52 |
@CatchException
|
53 |
+
def 读文章写摘要(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
54 |
history = [] # 清空历史,以免输入溢出
|
55 |
import glob, os
|
56 |
if os.path.exists(txt):
|
crazy_functions/高级功能函数模板.py
CHANGED
@@ -3,14 +3,14 @@ from toolbox import CatchException, report_execption, write_results_to_file, pre
|
|
3 |
fast_debug = False
|
4 |
|
5 |
@CatchException
|
6 |
-
def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
7 |
history = [] # 清空历史,以免输入溢出
|
8 |
for i in range(5):
|
9 |
i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}'
|
10 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
11 |
yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
|
12 |
|
13 |
-
gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) # 请求gpt,需要一段时间
|
14 |
|
15 |
chatbot[-1] = (i_say, gpt_say)
|
16 |
history.append(i_say);history.append(gpt_say)
|
|
|
3 |
fast_debug = False
|
4 |
|
5 |
@CatchException
|
6 |
+
def 高阶功能模板函数(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
7 |
history = [] # 清空历史,以免输入溢出
|
8 |
for i in range(5):
|
9 |
i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}'
|
10 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
11 |
yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
|
12 |
|
13 |
+
gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature) # 请求gpt,需要一段时间
|
14 |
|
15 |
chatbot[-1] = (i_say, gpt_say)
|
16 |
history.append(i_say);history.append(gpt_say)
|
main.py
CHANGED
@@ -49,6 +49,9 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
|
|
49 |
TRUE = gr.State(True)
|
50 |
FALSE = gr.State(False)
|
51 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
52 |
with gr.Row():
|
53 |
with gr.Column(scale=12):
|
54 |
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
@@ -77,15 +80,15 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
|
|
77 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
78 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
79 |
|
80 |
-
txt.submit(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
|
81 |
-
submitBtn.click(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
|
82 |
for k in functional:
|
83 |
functional[k]["Button"].click(predict,
|
84 |
-
[txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
|
85 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
86 |
for k in crazy_functional:
|
87 |
click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
|
88 |
-
[txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay]
|
89 |
)
|
90 |
try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
91 |
except: pass
|
|
|
49 |
TRUE = gr.State(True)
|
50 |
FALSE = gr.State(False)
|
51 |
with gr.Column(scale=1):
|
52 |
+
with gr.Row():
|
53 |
+
with gr.Column(scale=12):
|
54 |
+
api = gr.Textbox(show_label=False, placeholder="Input OpenAI Key.").style(container=False)
|
55 |
with gr.Row():
|
56 |
with gr.Column(scale=12):
|
57 |
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
|
|
80 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
81 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
82 |
|
83 |
+
txt.submit(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
|
84 |
+
submitBtn.click(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
|
85 |
for k in functional:
|
86 |
functional[k]["Button"].click(predict,
|
87 |
+
[api, txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
|
88 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
89 |
for k in crazy_functional:
|
90 |
click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
|
91 |
+
[api, txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay]
|
92 |
)
|
93 |
try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
94 |
except: pass
|
predict.py
CHANGED
@@ -25,7 +25,7 @@ def get_full_error(chunk, stream_response):
|
|
25 |
break
|
26 |
return chunk
|
27 |
|
28 |
-
def predict_no_ui(inputs, top_p, temperature, history=[]):
|
29 |
"""
|
30 |
发送至chatGPT,等待回复,一次性完成,不显示中间过程。
|
31 |
predict函数的简化版。
|
@@ -36,7 +36,7 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
|
|
36 |
history 是之前的对话列表
|
37 |
(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
|
38 |
"""
|
39 |
-
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt="", stream=False)
|
40 |
|
41 |
retry = 0
|
42 |
while True:
|
@@ -58,7 +58,7 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
|
|
58 |
raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
|
59 |
|
60 |
|
61 |
-
def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
|
62 |
stream = True, additional_fn=None):
|
63 |
"""
|
64 |
发送至chatGPT,流式获取输出。
|
@@ -81,7 +81,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
81 |
chatbot.append((inputs, ""))
|
82 |
yield chatbot, history, "等待响应"
|
83 |
|
84 |
-
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
|
85 |
history.append(inputs); history.append(" ")
|
86 |
|
87 |
retry = 0
|
@@ -135,7 +135,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
135 |
yield chatbot, history, "Json解析不合常规,很可能是文本过长" + error_msg
|
136 |
return
|
137 |
|
138 |
-
def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
|
139 |
"""
|
140 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
141 |
"""
|
|
|
25 |
break
|
26 |
return chunk
|
27 |
|
28 |
+
def predict_no_ui(api, inputs, top_p, temperature, history=[]):
|
29 |
"""
|
30 |
发送至chatGPT,等待回复,一次性完成,不显示中间过程。
|
31 |
predict函数的简化版。
|
|
|
36 |
history 是之前的对话列表
|
37 |
(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
|
38 |
"""
|
39 |
+
headers, payload = generate_payload(api, inputs, top_p, temperature, history, system_prompt="", stream=False)
|
40 |
|
41 |
retry = 0
|
42 |
while True:
|
|
|
58 |
raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
|
59 |
|
60 |
|
61 |
+
def predict(api, inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
|
62 |
stream = True, additional_fn=None):
|
63 |
"""
|
64 |
发送至chatGPT,流式获取输出。
|
|
|
81 |
chatbot.append((inputs, ""))
|
82 |
yield chatbot, history, "等待响应"
|
83 |
|
84 |
+
headers, payload = generate_payload(api, inputs, top_p, temperature, history, system_prompt, stream)
|
85 |
history.append(inputs); history.append(" ")
|
86 |
|
87 |
retry = 0
|
|
|
135 |
yield chatbot, history, "Json解析不合常规,很可能是文本过长" + error_msg
|
136 |
return
|
137 |
|
138 |
+
def generate_payload(api, inputs, top_p, temperature, history, system_prompt, stream):
|
139 |
"""
|
140 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
141 |
"""
|
toolbox.py
CHANGED
@@ -2,7 +2,7 @@ import markdown, mdtex2html, threading
|
|
2 |
from show_math import convert as convert_math
|
3 |
from functools import wraps
|
4 |
|
5 |
-
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]):
|
6 |
"""
|
7 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
8 |
"""
|
@@ -17,7 +17,7 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
|
17 |
def mt(i_say, history):
|
18 |
while True:
|
19 |
try:
|
20 |
-
mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
21 |
break
|
22 |
except ConnectionAbortedError as e:
|
23 |
if len(history) > 0:
|
@@ -73,9 +73,9 @@ def CatchException(f):
|
|
73 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
74 |
"""
|
75 |
@wraps(f)
|
76 |
-
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
77 |
try:
|
78 |
-
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
79 |
except Exception as e:
|
80 |
import traceback
|
81 |
from check_proxy import check_proxy
|
|
|
2 |
from show_math import convert as convert_math
|
3 |
from functools import wraps
|
4 |
|
5 |
+
def predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]):
|
6 |
"""
|
7 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
8 |
"""
|
|
|
17 |
def mt(i_say, history):
|
18 |
while True:
|
19 |
try:
|
20 |
+
mutable[0] = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
21 |
break
|
22 |
except ConnectionAbortedError as e:
|
23 |
if len(history) > 0:
|
|
|
73 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
74 |
"""
|
75 |
@wraps(f)
|
76 |
+
def decorated(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
77 |
try:
|
78 |
+
yield from f(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
79 |
except Exception as e:
|
80 |
import traceback
|
81 |
from check_proxy import check_proxy
|