qingxu99 commited on
Commit
6aba339
1 Parent(s): 294ac33

ChatGLM改成多进程运行

Browse files
crazy_functions/crazy_utils.py CHANGED
@@ -66,7 +66,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
66
  chatbot.append([inputs_show_user, ""])
67
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
68
  executor = ThreadPoolExecutor(max_workers=16)
69
- mutable = ["", time.time()]
70
  def _req_gpt(inputs, history, sys_prompt):
71
  retry_op = retry_times_at_unknown_error
72
  exceeded_cnt = 0
 
66
  chatbot.append([inputs_show_user, ""])
67
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
68
  executor = ThreadPoolExecutor(max_workers=16)
69
+ mutable = ["", time.time(), ""]
70
  def _req_gpt(inputs, history, sys_prompt):
71
  retry_op = retry_times_at_unknown_error
72
  exceeded_cnt = 0
crazy_functions/询问多个大语言模型.py CHANGED
@@ -20,7 +20,8 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
20
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
21
  inputs=txt, inputs_show_user=txt,
22
  llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
23
- sys_prompt=system_prompt
 
24
  )
25
 
26
  history.append(txt)
 
20
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
21
  inputs=txt, inputs_show_user=txt,
22
  llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
23
+ sys_prompt=system_prompt,
24
+ retry_times_at_unknown_error=0
25
  )
26
 
27
  history.append(txt)
docs/Dockerfile+ChatGLM CHANGED
@@ -24,7 +24,7 @@ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
24
 
25
  # 下载分支
26
  WORKDIR /gpt
27
- RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.0
28
  WORKDIR /gpt/chatgpt_academic
29
  RUN $useProxyNetwork python3 -m pip install -r requirements.txt
30
  RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
 
24
 
25
  # 下载分支
26
  WORKDIR /gpt
27
+ RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1
28
  WORKDIR /gpt/chatgpt_academic
29
  RUN $useProxyNetwork python3 -m pip install -r requirements.txt
30
  RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
main.py CHANGED
@@ -1,177 +1,182 @@
1
  import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
- import gradio as gr
3
- from request_llm.bridge_all import predict
4
- from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
5
 
6
- # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
7
- proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
8
- get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
 
 
 
 
9
 
10
- # 如果WEB_PORT是-1, 则随机选取WEB端口
11
- PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
12
- if not AUTHENTICATION: AUTHENTICATION = None
13
 
14
- from check_proxy import get_current_version
15
- initial_prompt = "Serve me as a writing and programming assistant."
16
- title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
17
- description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
18
 
19
- # 问询记录, python 版本建议3.9+(越新越好)
20
- import logging
21
- os.makedirs("gpt_log", exist_ok=True)
22
- try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
23
- except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
24
- print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
25
 
26
- # 一些普通功能模块
27
- from core_functional import get_core_functions
28
- functional = get_core_functions()
29
 
30
- # 高级函数插件
31
- from crazy_functional import get_crazy_functions
32
- crazy_fns = get_crazy_functions()
33
 
34
- # 处理markdown文本格式的转变
35
- gr.Chatbot.postprocess = format_io
36
 
37
- # 做一些外观色彩上的调整
38
- from theme import adjust_theme, advanced_css
39
- set_theme = adjust_theme()
40
 
41
- # 代理与自动更新
42
- from check_proxy import check_proxy, auto_update
43
- proxy_info = check_proxy(proxies)
44
 
45
- gr_L1 = lambda: gr.Row().style()
46
- gr_L2 = lambda scale: gr.Column(scale=scale)
47
- if LAYOUT == "TOP-DOWN":
48
- gr_L1 = lambda: DummyWith()
49
- gr_L2 = lambda scale: gr.Row()
50
- CHATBOT_HEIGHT /= 2
51
 
52
- cancel_handles = []
53
- with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
54
- gr.HTML(title_html)
55
- cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
56
- with gr_L1():
57
- with gr_L2(scale=2):
58
- chatbot = gr.Chatbot()
59
- chatbot.style(height=CHATBOT_HEIGHT)
60
- history = gr.State([])
61
- with gr_L2(scale=1):
62
- with gr.Accordion("输入区", open=True) as area_input_primary:
63
- with gr.Row():
64
- txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
65
- with gr.Row():
66
- submitBtn = gr.Button("提交", variant="primary")
67
- with gr.Row():
68
- resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
69
- stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
70
- with gr.Row():
71
- status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
72
- with gr.Accordion("基础功能区", open=True) as area_basic_fn:
73
- with gr.Row():
74
- for k in functional:
75
- variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
76
- functional[k]["Button"] = gr.Button(k, variant=variant)
77
- with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
78
- with gr.Row():
79
- gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
80
- with gr.Row():
81
- for k in crazy_fns:
82
- if not crazy_fns[k].get("AsButton", True): continue
83
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
84
- crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
85
- crazy_fns[k]["Button"].style(size="sm")
86
- with gr.Row():
87
- with gr.Accordion("更多函数插件", open=True):
88
- dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
89
- with gr.Column(scale=1):
90
- dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
91
- with gr.Column(scale=1):
92
- switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
93
- with gr.Row():
94
- with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
95
- file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
96
- with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
97
- system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
98
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
99
- temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
100
- max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
101
- checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
102
- md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
103
 
104
- gr.Markdown(description)
105
- with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
106
- with gr.Row():
107
- txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
108
- with gr.Row():
109
- submitBtn2 = gr.Button("提交", variant="primary")
110
- with gr.Row():
111
- resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
112
- stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
113
- # 功能区显示开关与功能区的互动
114
- def fn_area_visibility(a):
115
- ret = {}
116
- ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
117
- ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
118
- ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
119
- ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
120
- if "底部输入区" in a: ret.update({txt: gr.update(value="")})
121
- return ret
122
- checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
123
- # 整理反复出现的控件句柄组合
124
- input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
125
- output_combo = [cookies, chatbot, history, status]
126
- predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
127
- # 提交按钮、重置按钮
128
- cancel_handles.append(txt.submit(**predict_args))
129
- cancel_handles.append(txt2.submit(**predict_args))
130
- cancel_handles.append(submitBtn.click(**predict_args))
131
- cancel_handles.append(submitBtn2.click(**predict_args))
132
- resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
133
- resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
134
- # 基础功能区的回调函数注册
135
- for k in functional:
136
- click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
137
- cancel_handles.append(click_handle)
138
- # 文件上传区,接收文件后与chatbot的互动
139
- file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
140
- # 函数插件-固定按钮区
141
- for k in crazy_fns:
142
- if not crazy_fns[k].get("AsButton", True): continue
143
- click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
 
 
 
 
 
 
 
 
 
 
 
 
144
  click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
 
 
 
145
  cancel_handles.append(click_handle)
146
- # 函数插件-下拉菜单与随变按钮的互动
147
- def on_dropdown_changed(k):
148
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
149
- return {switchy_bt: gr.update(value=k, variant=variant)}
150
- dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
151
- # 随变按钮的回调��数注册
152
- def route(k, *args, **kwargs):
153
- if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
154
- yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
155
- click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
156
- click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
157
- # def expand_file_area(file_upload, area_file_up):
158
- # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
159
- # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
160
- cancel_handles.append(click_handle)
161
- # 终止按钮的回调函数注册
162
- stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
163
- stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
164
- # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
165
- def auto_opentab_delay():
166
- import threading, webbrowser, time
167
- print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
168
- print(f"\t(亮色主题): http://localhost:{PORT}")
169
- print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
170
- def open():
171
- time.sleep(2) # 打开浏览器
172
- webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
173
- threading.Thread(target=open, name="open-browser", daemon=True).start()
174
- threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
175
 
176
- auto_opentab_delay()
177
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
 
1
  import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
 
 
 
2
 
3
+ def main():
4
+ import gradio as gr
5
+ from request_llm.bridge_all import predict
6
+ from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
7
+ # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
8
+ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
9
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
10
 
11
+ # 如果WEB_PORT是-1, 则随机选取WEB端口
12
+ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
13
+ if not AUTHENTICATION: AUTHENTICATION = None
14
 
15
+ from check_proxy import get_current_version
16
+ initial_prompt = "Serve me as a writing and programming assistant."
17
+ title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
18
+ description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
19
 
20
+ # 问询记录, python 版本建议3.9+(越新越好)
21
+ import logging
22
+ os.makedirs("gpt_log", exist_ok=True)
23
+ try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
24
+ except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
25
+ print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
26
 
27
+ # 一些普通功能模块
28
+ from core_functional import get_core_functions
29
+ functional = get_core_functions()
30
 
31
+ # 高级函数插件
32
+ from crazy_functional import get_crazy_functions
33
+ crazy_fns = get_crazy_functions()
34
 
35
+ # 处理markdown文本格式的转变
36
+ gr.Chatbot.postprocess = format_io
37
 
38
+ # 做一些外观色彩上的调整
39
+ from theme import adjust_theme, advanced_css
40
+ set_theme = adjust_theme()
41
 
42
+ # 代理与自动更新
43
+ from check_proxy import check_proxy, auto_update
44
+ proxy_info = check_proxy(proxies)
45
 
46
+ gr_L1 = lambda: gr.Row().style()
47
+ gr_L2 = lambda scale: gr.Column(scale=scale)
48
+ if LAYOUT == "TOP-DOWN":
49
+ gr_L1 = lambda: DummyWith()
50
+ gr_L2 = lambda scale: gr.Row()
51
+ CHATBOT_HEIGHT /= 2
52
 
53
+ cancel_handles = []
54
+ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
55
+ gr.HTML(title_html)
56
+ cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
57
+ with gr_L1():
58
+ with gr_L2(scale=2):
59
+ chatbot = gr.Chatbot()
60
+ chatbot.style(height=CHATBOT_HEIGHT)
61
+ history = gr.State([])
62
+ with gr_L2(scale=1):
63
+ with gr.Accordion("输入区", open=True) as area_input_primary:
64
+ with gr.Row():
65
+ txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
66
+ with gr.Row():
67
+ submitBtn = gr.Button("提交", variant="primary")
68
+ with gr.Row():
69
+ resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
70
+ stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
71
+ with gr.Row():
72
+ status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
73
+ with gr.Accordion("基础功能区", open=True) as area_basic_fn:
74
+ with gr.Row():
75
+ for k in functional:
76
+ variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
77
+ functional[k]["Button"] = gr.Button(k, variant=variant)
78
+ with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
79
+ with gr.Row():
80
+ gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
81
+ with gr.Row():
82
+ for k in crazy_fns:
83
+ if not crazy_fns[k].get("AsButton", True): continue
84
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
85
+ crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
86
+ crazy_fns[k]["Button"].style(size="sm")
87
+ with gr.Row():
88
+ with gr.Accordion("更多函数插件", open=True):
89
+ dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
90
+ with gr.Column(scale=1):
91
+ dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
92
+ with gr.Column(scale=1):
93
+ switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
94
+ with gr.Row():
95
+ with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
96
+ file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
97
+ with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
98
+ system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
99
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
100
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
101
+ max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
102
+ checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
103
+ md_dropdown = gr.Dropdown(["gpt-3.5-turbo", "chatglm"], value=LLM_MODEL, label="").style(container=False)
104
 
105
+ gr.Markdown(description)
106
+ with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
107
+ with gr.Row():
108
+ txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
109
+ with gr.Row():
110
+ submitBtn2 = gr.Button("提交", variant="primary")
111
+ with gr.Row():
112
+ resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
113
+ stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
114
+ # 功能区显示开关与功能区的互动
115
+ def fn_area_visibility(a):
116
+ ret = {}
117
+ ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
118
+ ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
119
+ ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
120
+ ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
121
+ if "底部输入区" in a: ret.update({txt: gr.update(value="")})
122
+ return ret
123
+ checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
124
+ # 整理反复出现的控件句柄组合
125
+ input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
126
+ output_combo = [cookies, chatbot, history, status]
127
+ predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
128
+ # 提交按钮、重置按钮
129
+ cancel_handles.append(txt.submit(**predict_args))
130
+ cancel_handles.append(txt2.submit(**predict_args))
131
+ cancel_handles.append(submitBtn.click(**predict_args))
132
+ cancel_handles.append(submitBtn2.click(**predict_args))
133
+ resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
134
+ resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
135
+ # 基础功能区的回调函数注册
136
+ for k in functional:
137
+ click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
138
+ cancel_handles.append(click_handle)
139
+ # 文件上传区,接收文件后与chatbot的互动
140
+ file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
141
+ # 函数插件-固定按钮区
142
+ for k in crazy_fns:
143
+ if not crazy_fns[k].get("AsButton", True): continue
144
+ click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
145
+ click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
146
+ cancel_handles.append(click_handle)
147
+ # 函数插件-下拉菜单与随变按钮的互动
148
+ def on_dropdown_changed(k):
149
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
150
+ return {switchy_bt: gr.update(value=k, variant=variant)}
151
+ dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
152
+ # 随变按钮的回调函数注册
153
+ def route(k, *args, **kwargs):
154
+ if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
155
+ yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
156
+ click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
157
  click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
158
+ # def expand_file_area(file_upload, area_file_up):
159
+ # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
160
+ # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
161
  cancel_handles.append(click_handle)
162
+ # 终止按钮的回调函数注册
163
+ stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
164
+ stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
165
+
166
+ # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
167
+ def auto_opentab_delay():
168
+ import threading, webbrowser, time
169
+ print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
170
+ print(f"\t(亮色主题): http://localhost:{PORT}")
171
+ print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
172
+ def open():
173
+ time.sleep(2) # 打开浏览器
174
+ webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
175
+ threading.Thread(target=open, name="open-browser", daemon=True).start()
176
+ threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
177
+
178
+ auto_opentab_delay()
179
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
 
 
 
 
 
 
 
 
 
 
 
180
 
181
+ if __name__ == "__main__":
182
+ main()
request_llm/bridge_all.py CHANGED
@@ -31,6 +31,24 @@ methods = {
31
  "tgui-ui": tgui_ui,
32
  }
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
35
  """
36
  发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
@@ -62,17 +80,13 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
62
  return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
63
  else:
64
  # 如果同时询问多个大语言模型:
65
- executor = ThreadPoolExecutor(max_workers=16)
66
  models = model.split('&')
67
  n_model = len(models)
68
 
69
  window_len = len(observe_window)
70
- if window_len==0:
71
- window_mutex = [[] for _ in range(n_model)] + [True]
72
- elif window_len==1:
73
- window_mutex = [[""] for _ in range(n_model)] + [True]
74
- elif window_len==2:
75
- window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
76
 
77
  futures = []
78
  for i in range(n_model):
@@ -85,12 +99,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
85
  method = methods['tgui-no-ui']
86
  llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
87
  llm_kwargs_feedin['llm_model'] = model
88
- future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
89
  futures.append(future)
90
 
91
  def mutex_manager(window_mutex, observe_window):
92
  while True:
93
- time.sleep(0.2)
94
  if not window_mutex[-1]: break
95
  # 看门狗(watchdog)
96
  for i in range(n_model):
@@ -98,8 +112,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
98
  # 观察窗(window)
99
  chat_string = []
100
  for i in range(n_model):
101
- chat_string.append( f"[{str(models[i])} ]: {window_mutex[i][0]}" )
102
- res = '\n\n---\n\n'.join(chat_string)
103
  # # # # # # # # # # #
104
  observe_window[0] = res
105
 
@@ -107,10 +121,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
107
  t_model.start()
108
 
109
  return_string_collect = []
 
 
 
 
 
 
 
110
  for i, future in enumerate(futures): # wait and get
111
- return_string_collect.append( f"[{str(models[i])} ]: {future.result()}" )
 
112
  window_mutex[-1] = False # stop mutex thread
113
- res = '\n\n---\n\n'.join(return_string_collect)
114
  return res
115
 
116
 
 
31
  "tgui-ui": tgui_ui,
32
  }
33
 
34
+ def LLM_CATCH_EXCEPTION(f):
35
+ """
36
+ 装饰器函数,将错误显示出来
37
+ """
38
+ def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
39
+ try:
40
+ return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
41
+ except Exception as e:
42
+ from toolbox import get_conf
43
+ import traceback
44
+ proxies, = get_conf('proxies')
45
+ tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
46
+ observe_window[0] = tb_str
47
+ return tb_str
48
+ return decorated
49
+
50
+ colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
51
+
52
  def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
53
  """
54
  发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
 
80
  return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
81
  else:
82
  # 如果同时询问多个大语言模型:
83
+ executor = ThreadPoolExecutor(max_workers=4)
84
  models = model.split('&')
85
  n_model = len(models)
86
 
87
  window_len = len(observe_window)
88
+ assert window_len==3
89
+ window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
 
 
 
 
90
 
91
  futures = []
92
  for i in range(n_model):
 
99
  method = methods['tgui-no-ui']
100
  llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
101
  llm_kwargs_feedin['llm_model'] = model
102
+ future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
103
  futures.append(future)
104
 
105
  def mutex_manager(window_mutex, observe_window):
106
  while True:
107
+ time.sleep(0.5)
108
  if not window_mutex[-1]: break
109
  # 看门狗(watchdog)
110
  for i in range(n_model):
 
112
  # 观察窗(window)
113
  chat_string = []
114
  for i in range(n_model):
115
+ chat_string.append( f"{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
116
+ res = '<br/><br/>\n\n---\n\n'.join(chat_string)
117
  # # # # # # # # # # #
118
  observe_window[0] = res
119
 
 
121
  t_model.start()
122
 
123
  return_string_collect = []
124
+ while True:
125
+ worker_done = [h.done() for h in futures]
126
+ if all(worker_done):
127
+ executor.shutdown()
128
+ break
129
+ time.sleep(1)
130
+
131
  for i, future in enumerate(futures): # wait and get
132
+ return_string_collect.append( f"{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
133
+
134
  window_mutex[-1] = False # stop mutex thread
135
+ res = '<br/>\n\n---\n\n'.join(return_string_collect)
136
  return res
137
 
138
 
request_llm/bridge_chatglm.py CHANGED
@@ -3,35 +3,69 @@ from transformers import AutoModel, AutoTokenizer
3
  import time
4
  import importlib
5
  from toolbox import update_ui, get_conf
 
6
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- global chatglm_model, chatglm_tokenizer
9
-
10
- chatglm_model = None
11
- chatglm_tokenizer = None
12
-
13
- def model_loader():
14
- global chatglm_model, chatglm_tokenizer
15
- if chatglm_tokenizer is None:
16
- chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
17
- if chatglm_model is None: # 尚未加载
18
- device, = get_conf('LOCAL_MODEL_DEVICE')
19
- if device=='cpu':
20
- chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
21
- else:
22
- chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
23
- chatglm_model = chatglm_model.eval()
24
- chatglm_model = chatglm_model.eval()
 
 
 
 
 
 
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
27
  """
 
28
  函数的说明请见 request_llm/bridge_all.py
29
  """
30
- global chatglm_model, chatglm_tokenizer
31
- if chatglm_model is None:
32
- observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
 
33
 
34
- model_loader()
35
  # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
36
  history_feedin = []
37
  for i in range(len(history)//2):
@@ -40,29 +74,27 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
40
 
41
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
42
  response = ""
43
- for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
44
- top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
45
- # 观测窗,把已经获取的数据显示出去
46
  observe_window[0] = response
47
- # 看门狗 (watchdog),如果超过期限没有喂狗,则终止
48
  if len(observe_window) >= 2:
49
  if (time.time()-observe_window[1]) > watch_dog_patience:
50
  raise RuntimeError("程序终止。")
51
- # if not console_slience:
52
- # print(response)
53
  return response
54
 
55
 
 
56
  def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
57
  """
 
58
  函数的说明请见 request_llm/bridge_all.py
59
  """
60
- global chatglm_model, chatglm_tokenizer
61
  chatbot.append((inputs, ""))
62
- if chatglm_model is None:
63
- chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
 
 
 
64
  yield from update_ui(chatbot=chatbot, history=[])
65
- model_loader()
66
 
67
  if additional_fn is not None:
68
  import core_functional
@@ -71,13 +103,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
71
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
72
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
73
 
74
-
75
  history_feedin = []
76
  for i in range(len(history)//2):
77
  history_feedin.append(["What can I do?", system_prompt] )
78
  history_feedin.append([history[2*i], history[2*i+1]] )
79
 
80
- for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
81
- top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
82
  chatbot[-1] = (inputs, response)
83
  yield from update_ui(chatbot=chatbot, history=history)
 
3
  import time
4
  import importlib
5
  from toolbox import update_ui, get_conf
6
+ from multiprocessing import Process, Pipe
7
 
8
+ #################################################################################
9
+ class GetGLMHandle(Process):
10
+ def __init__(self):
11
+ super().__init__(daemon=True)
12
+ self.parent, self.child = Pipe()
13
+ self.chatglm_model = None
14
+ self.chatglm_tokenizer = None
15
+ self.start()
16
+ print('初始化')
17
+
18
+ def ready(self):
19
+ return self.chatglm_model is not None
20
 
21
+ def run(self):
22
+ while True:
23
+ try:
24
+ if self.chatglm_model is None:
25
+ self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
26
+ device, = get_conf('LOCAL_MODEL_DEVICE')
27
+ if device=='cpu':
28
+ self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
29
+ else:
30
+ self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
31
+ self.chatglm_model = self.chatglm_model.eval()
32
+ break
33
+ else:
34
+ break
35
+ except:
36
+ pass
37
+ while True:
38
+ kwargs = self.child.recv()
39
+ try:
40
+ for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
41
+ self.child.send(response)
42
+ except:
43
+ self.child.send('[Local Message] Call ChatGLM fail.')
44
+ self.child.send('[Finish]')
45
 
46
+ def stream_chat(self, **kwargs):
47
+ self.parent.send(kwargs)
48
+ while True:
49
+ res = self.parent.recv()
50
+ if res != '[Finish]':
51
+ yield res
52
+ else:
53
+ break
54
+ return
55
+
56
+ global glm_handle
57
+ glm_handle = None
58
+ #################################################################################
59
  def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
60
  """
61
+ 多线程方法
62
  函数的说明请见 request_llm/bridge_all.py
63
  """
64
+ global glm_handle
65
+ if glm_handle is None:
66
+ glm_handle = GetGLMHandle()
67
+ observe_window[0] = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
68
 
 
69
  # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
70
  history_feedin = []
71
  for i in range(len(history)//2):
 
74
 
75
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
76
  response = ""
77
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
 
 
78
  observe_window[0] = response
 
79
  if len(observe_window) >= 2:
80
  if (time.time()-observe_window[1]) > watch_dog_patience:
81
  raise RuntimeError("程序终止。")
 
 
82
  return response
83
 
84
 
85
+
86
  def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
87
  """
88
+ 单线程方法
89
  函数的说明请见 request_llm/bridge_all.py
90
  """
 
91
  chatbot.append((inputs, ""))
92
+
93
+ global glm_handle
94
+ if glm_handle is None:
95
+ glm_handle = GetGLMHandle()
96
+ chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……")
97
  yield from update_ui(chatbot=chatbot, history=[])
 
98
 
99
  if additional_fn is not None:
100
  import core_functional
 
103
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
104
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
105
 
 
106
  history_feedin = []
107
  for i in range(len(history)//2):
108
  history_feedin.append(["What can I do?", system_prompt] )
109
  history_feedin.append([history[2*i], history[2*i+1]] )
110
 
111
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
 
112
  chatbot[-1] = (inputs, response)
113
  yield from update_ui(chatbot=chatbot, history=history)
version CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "version": 3.0,
3
  "show_feature": true,
4
  "new_feature": "支持ChatGLM <-> 支持多LLM模型同时对话"
5
  }
 
1
  {
2
+ "version": 3.1,
3
  "show_feature": true,
4
  "new_feature": "支持ChatGLM <-> 支持多LLM模型同时对话"
5
  }