JohnSmith9982 commited on
Commit
a51e754
1 Parent(s): 85095bb

GitHub 94adb4f

Browse files
Files changed (8) hide show
  1. app.py +339 -58
  2. chat_func.py +423 -0
  3. custom.css +188 -0
  4. llama_func.py +192 -0
  5. overwrites.py +40 -0
  6. presets.py +59 -69
  7. requirements.txt +3 -0
  8. utils.py +100 -261
app.py CHANGED
@@ -1,18 +1,24 @@
1
  # -*- coding:utf-8 -*-
2
- import gradio as gr
3
  import os
4
  import logging
5
  import sys
6
- import argparse
 
 
7
  from utils import *
8
  from presets import *
 
 
9
 
10
- logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
 
 
 
11
 
12
- my_api_key = "" # 在这里输入你的 API 密钥
13
 
14
- #if we are running in Docker
15
- if os.environ.get('dockerrun') == 'yes':
16
  dockerflag = True
17
  else:
18
  dockerflag = False
@@ -20,17 +26,21 @@ else:
20
  authflag = False
21
 
22
  if dockerflag:
23
- my_api_key = os.environ.get('my_api_key')
24
  if my_api_key == "empty":
25
  logging.error("Please give a api key!")
26
  sys.exit(1)
27
- #auth
28
- username = os.environ.get('USERNAME')
29
- password = os.environ.get('PASSWORD')
30
  if not (isinstance(username, type(None)) or isinstance(password, type(None))):
31
  authflag = True
32
  else:
33
- if not my_api_key and os.path.exists("api_key.txt") and os.path.getsize("api_key.txt"):
 
 
 
 
34
  with open("api_key.txt", "r") as f:
35
  my_api_key = f.read().strip()
36
  if os.path.exists("auth.json"):
@@ -42,11 +52,78 @@ else:
42
  authflag = True
43
 
44
  gr.Chatbot.postprocess = postprocess
 
 
 
 
45
 
46
- with gr.Blocks(css=customCSS) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  history = gr.State([])
48
  token_count = gr.State([])
49
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
 
50
  TRUECOMSTANT = gr.State(True)
51
  FALSECONSTANT = gr.State(False)
52
  topic = gr.State("未命名对话历史记录")
@@ -58,114 +135,318 @@ with gr.Blocks(css=customCSS) as demo:
58
  with gr.Row(scale=1).style(equal_height=True):
59
  with gr.Column(scale=5):
60
  with gr.Row(scale=1):
61
- chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
62
  with gr.Row(scale=1):
63
  with gr.Column(scale=12):
64
- user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
65
- container=False)
66
- with gr.Column(min_width=50, scale=1):
67
- submitBtn = gr.Button("🚀", variant="primary")
 
68
  with gr.Row(scale=1):
69
- emptyBtn = gr.Button("🧹 新的对话",)
 
 
70
  retryBtn = gr.Button("🔄 重新生成")
71
  delLastBtn = gr.Button("🗑️ 删除一条对话")
72
  reduceTokenBtn = gr.Button("♻️ 总结对话")
73
 
74
  with gr.Column():
75
- with gr.Column(min_width=50,scale=1):
76
  with gr.Tab(label="ChatGPT"):
77
- keyTxt = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY, label="API-Key")
78
- model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0])
79
- with gr.Accordion("参数", open=False):
80
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
81
- interactive=True, label="Top-p (nucleus sampling)",)
82
- temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0,
83
- step=0.1, interactive=True, label="Temperature",)
84
- use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
 
 
 
 
 
 
85
  use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
 
86
 
87
  with gr.Tab(label="Prompt"):
88
- systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...", label="System prompt", value=initial_prompt).style(container=True)
 
 
 
 
 
 
89
  with gr.Accordion(label="加载Prompt模板", open=True):
90
  with gr.Column():
91
  with gr.Row():
92
  with gr.Column(scale=6):
93
- templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0])
 
 
 
 
 
94
  with gr.Column(scale=1):
95
  templateRefreshBtn = gr.Button("🔄 刷新")
96
  with gr.Row():
97
  with gr.Column():
98
- templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0])
 
 
 
 
 
 
 
 
 
99
 
100
  with gr.Tab(label="保存/加载"):
101
  with gr.Accordion(label="保存/加载对话历史记录", open=True):
102
- gr.Markdown("对话历史默认保存在history文件夹中。")
103
  with gr.Column():
104
  with gr.Row():
105
  with gr.Column(scale=6):
106
- saveFileName = gr.Textbox(
107
- show_label=True, placeholder=f"设置文件名: 默认为.json,可选为.md", label="设置保存文件名", value="对话历史记录").style(container=True)
 
 
 
 
108
  with gr.Column(scale=1):
109
- saveHistoryBtn = gr.Button("💾 保存对话")
110
- exportMarkdownBtn = gr.Button("📝 导出为Markdown")
111
  with gr.Row():
112
  with gr.Column(scale=6):
113
- historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
 
 
 
 
 
114
  with gr.Column(scale=1):
115
- historyRefreshBtn = gr.Button("🔄 刷新")
 
 
116
  with gr.Row():
117
  with gr.Column():
118
  downloadFile = gr.File(interactive=True)
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  gr.Markdown(description)
121
 
 
122
  # Chatbot
123
- user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  user_input.submit(reset_textbox, [], [user_input])
125
 
126
- submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  submitBtn.click(reset_textbox, [], [user_input])
128
 
129
- emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
 
 
 
 
130
 
131
- retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
- delLastBtn.click(delete_last_conversation, [chatbot, history, token_count], [
134
- chatbot, history, token_count, status_display], show_progress=True)
 
 
 
 
135
 
136
- reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  # Template
139
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
140
- templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
141
- templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
 
 
 
 
 
 
 
 
 
 
142
 
143
  # S&L
144
- saveHistoryBtn.click(save_chat_history, [saveFileName, systemPromptTxt, history, chatbot], downloadFile, show_progress=True)
 
 
 
 
 
145
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
146
- exportMarkdownBtn.click(export_markdown, [saveFileName, systemPromptTxt, history, chatbot], downloadFile, show_progress=True)
 
 
 
 
 
147
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
148
- historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
149
- downloadFile.change(load_chat_history, [downloadFile, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot])
 
 
 
 
 
 
 
 
 
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- logging.info(colorama.Back.GREEN + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
 
 
 
 
153
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
154
  demo.title = "川虎ChatGPT 🚀"
155
 
156
  if __name__ == "__main__":
157
- #if running in Docker
158
  if dockerflag:
159
  if authflag:
160
- demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
 
 
161
  else:
162
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
163
- #if not running in Docker
164
  else:
165
  if authflag:
166
  demo.queue().launch(share=False, auth=(username, password))
167
  else:
168
- demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
169
- #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
170
- #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
171
- #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
 
1
  # -*- coding:utf-8 -*-
 
2
  import os
3
  import logging
4
  import sys
5
+
6
+ import gradio as gr
7
+
8
  from utils import *
9
  from presets import *
10
+ from overwrites import *
11
+ from chat_func import *
12
 
13
+ logging.basicConfig(
14
+ level=logging.DEBUG,
15
+ format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
16
+ )
17
 
18
+ my_api_key = "" # 在这里输入你的 API 密钥
19
 
20
+ # if we are running in Docker
21
+ if os.environ.get("dockerrun") == "yes":
22
  dockerflag = True
23
  else:
24
  dockerflag = False
 
26
  authflag = False
27
 
28
  if dockerflag:
29
+ my_api_key = os.environ.get("my_api_key")
30
  if my_api_key == "empty":
31
  logging.error("Please give a api key!")
32
  sys.exit(1)
33
+ # auth
34
+ username = os.environ.get("USERNAME")
35
+ password = os.environ.get("PASSWORD")
36
  if not (isinstance(username, type(None)) or isinstance(password, type(None))):
37
  authflag = True
38
  else:
39
+ if (
40
+ not my_api_key
41
+ and os.path.exists("api_key.txt")
42
+ and os.path.getsize("api_key.txt")
43
+ ):
44
  with open("api_key.txt", "r") as f:
45
  my_api_key = f.read().strip()
46
  if os.path.exists("auth.json"):
 
52
  authflag = True
53
 
54
  gr.Chatbot.postprocess = postprocess
55
+ PromptHelper.compact_text_chunks = compact_text_chunks
56
+
57
+ with open("custom.css", "r", encoding="utf-8") as f:
58
+ customCSS = f.read()
59
 
60
+ with gr.Blocks(
61
+ css=customCSS,
62
+ theme=gr.themes.Soft(
63
+ primary_hue=gr.themes.Color(
64
+ c50="#02C160",
65
+ c100="rgba(2, 193, 96, 0.2)",
66
+ c200="#02C160",
67
+ c300="rgba(2, 193, 96, 0.32)",
68
+ c400="rgba(2, 193, 96, 0.32)",
69
+ c500="rgba(2, 193, 96, 1.0)",
70
+ c600="rgba(2, 193, 96, 1.0)",
71
+ c700="rgba(2, 193, 96, 0.32)",
72
+ c800="rgba(2, 193, 96, 0.32)",
73
+ c900="#02C160",
74
+ c950="#02C160",
75
+ ),
76
+ secondary_hue=gr.themes.Color(
77
+ c50="#576b95",
78
+ c100="#576b95",
79
+ c200="#576b95",
80
+ c300="#576b95",
81
+ c400="#576b95",
82
+ c500="#576b95",
83
+ c600="#576b95",
84
+ c700="#576b95",
85
+ c800="#576b95",
86
+ c900="#576b95",
87
+ c950="#576b95",
88
+ ),
89
+ neutral_hue=gr.themes.Color(
90
+ name="gray",
91
+ c50="#f9fafb",
92
+ c100="#f3f4f6",
93
+ c200="#e5e7eb",
94
+ c300="#d1d5db",
95
+ c400="#B2B2B2",
96
+ c500="#808080",
97
+ c600="#636363",
98
+ c700="#515151",
99
+ c800="#393939",
100
+ c900="#272727",
101
+ c950="#171717",
102
+ ),
103
+ radius_size=gr.themes.sizes.radius_sm,
104
+ ).set(
105
+ button_primary_background_fill="#06AE56",
106
+ button_primary_background_fill_dark="#06AE56",
107
+ button_primary_background_fill_hover="#07C863",
108
+ button_primary_border_color="#06AE56",
109
+ button_primary_border_color_dark="#06AE56",
110
+ button_primary_text_color="#FFFFFF",
111
+ button_primary_text_color_dark="#FFFFFF",
112
+ button_secondary_background_fill="#F2F2F2",
113
+ button_secondary_background_fill_dark="#2B2B2B",
114
+ button_secondary_text_color="#393939",
115
+ button_secondary_text_color_dark="#FFFFFF",
116
+ # background_fill_primary="#F7F7F7",
117
+ # background_fill_primary_dark="#1F1F1F",
118
+ block_title_text_color="*primary_500",
119
+ block_title_background_fill = "*primary_100",
120
+ input_background_fill="#F6F6F6",
121
+ ),
122
+ ) as demo:
123
  history = gr.State([])
124
  token_count = gr.State([])
125
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
126
+ user_api_key = gr.State(my_api_key)
127
  TRUECOMSTANT = gr.State(True)
128
  FALSECONSTANT = gr.State(False)
129
  topic = gr.State("未命名对话历史记录")
 
135
  with gr.Row(scale=1).style(equal_height=True):
136
  with gr.Column(scale=5):
137
  with gr.Row(scale=1):
138
+ chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%")
139
  with gr.Row(scale=1):
140
  with gr.Column(scale=12):
141
+ user_input = gr.Textbox(
142
+ show_label=False, placeholder="在这里输入"
143
+ ).style(container=False)
144
+ with gr.Column(min_width=70, scale=1):
145
+ submitBtn = gr.Button("发送", variant="primary")
146
  with gr.Row(scale=1):
147
+ emptyBtn = gr.Button(
148
+ "🧹 新的对话",
149
+ )
150
  retryBtn = gr.Button("🔄 重新生成")
151
  delLastBtn = gr.Button("🗑️ 删除一条对话")
152
  reduceTokenBtn = gr.Button("♻️ 总结对话")
153
 
154
  with gr.Column():
155
+ with gr.Column(min_width=50, scale=1):
156
  with gr.Tab(label="ChatGPT"):
157
+ keyTxt = gr.Textbox(
158
+ show_label=True,
159
+ placeholder=f"OpenAI API-key...",
160
+ value=hide_middle_chars(my_api_key),
161
+ type="password",
162
+ visible=not HIDE_MY_KEY,
163
+ label="API-Key(按Enter提交)",
164
+ )
165
+ model_select_dropdown = gr.Dropdown(
166
+ label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0]
167
+ )
168
+ use_streaming_checkbox = gr.Checkbox(
169
+ label="实时传输回答", value=True, visible=enable_streaming_option
170
+ )
171
  use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
172
+ index_files = gr.Files(label="上传索引文件", type="file", multiple=True)
173
 
174
  with gr.Tab(label="Prompt"):
175
+ systemPromptTxt = gr.Textbox(
176
+ show_label=True,
177
+ placeholder=f"在这里输入System Prompt...",
178
+ label="System prompt",
179
+ value=initial_prompt,
180
+ lines=10,
181
+ ).style(container=True)
182
  with gr.Accordion(label="加载Prompt模板", open=True):
183
  with gr.Column():
184
  with gr.Row():
185
  with gr.Column(scale=6):
186
+ templateFileSelectDropdown = gr.Dropdown(
187
+ label="选择Prompt模板集合文件",
188
+ choices=get_template_names(plain=True),
189
+ multiselect=False,
190
+ value=get_template_names(plain=True)[0],
191
+ )
192
  with gr.Column(scale=1):
193
  templateRefreshBtn = gr.Button("🔄 刷新")
194
  with gr.Row():
195
  with gr.Column():
196
+ templateSelectDropdown = gr.Dropdown(
197
+ label="从Prompt模板中加载",
198
+ choices=load_template(
199
+ get_template_names(plain=True)[0], mode=1
200
+ ),
201
+ multiselect=False,
202
+ value=load_template(
203
+ get_template_names(plain=True)[0], mode=1
204
+ )[0],
205
+ )
206
 
207
  with gr.Tab(label="保存/加载"):
208
  with gr.Accordion(label="保存/加载对话历史记录", open=True):
 
209
  with gr.Column():
210
  with gr.Row():
211
  with gr.Column(scale=6):
212
+ historyFileSelectDropdown = gr.Dropdown(
213
+ label="从列表中加载对话",
214
+ choices=get_history_names(plain=True),
215
+ multiselect=False,
216
+ value=get_history_names(plain=True)[0],
217
+ )
218
  with gr.Column(scale=1):
219
+ historyRefreshBtn = gr.Button("🔄 刷新")
 
220
  with gr.Row():
221
  with gr.Column(scale=6):
222
+ saveFileName = gr.Textbox(
223
+ show_label=True,
224
+ placeholder=f"设置文件名: 默认为.json,可选为.md",
225
+ label="设置保存文件名",
226
+ value="对话历史记录",
227
+ ).style(container=True)
228
  with gr.Column(scale=1):
229
+ saveHistoryBtn = gr.Button("💾 保存对话")
230
+ exportMarkdownBtn = gr.Button("📝 导出为Markdown")
231
+ gr.Markdown("默认保存于history文件夹")
232
  with gr.Row():
233
  with gr.Column():
234
  downloadFile = gr.File(interactive=True)
235
 
236
+ with gr.Tab(label="高级"):
237
+ default_btn = gr.Button("🔙 恢复默认设置")
238
+ gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")
239
+
240
+ with gr.Accordion("参数", open=False):
241
+ top_p = gr.Slider(
242
+ minimum=-0,
243
+ maximum=1.0,
244
+ value=1.0,
245
+ step=0.05,
246
+ interactive=True,
247
+ label="Top-p (nucleus sampling)",
248
+ )
249
+ temperature = gr.Slider(
250
+ minimum=-0,
251
+ maximum=2.0,
252
+ value=1.0,
253
+ step=0.1,
254
+ interactive=True,
255
+ label="Temperature",
256
+ )
257
+
258
+ apiurlTxt = gr.Textbox(
259
+ show_label=True,
260
+ placeholder=f"在这里输入API地址...",
261
+ label="API地址",
262
+ value="https://api.openai.com/v1/chat/completions",
263
+ lines=2,
264
+ )
265
+ changeAPIURLBtn = gr.Button("🔄 切换API地址")
266
+ proxyTxt = gr.Textbox(
267
+ show_label=True,
268
+ placeholder=f"在这里输入代理地址...",
269
+ label="代理地址(示例:http://127.0.0.1:10809)",
270
+ value="",
271
+ lines=2,
272
+ )
273
+ changeProxyBtn = gr.Button("🔄 设置代理地址")
274
+
275
  gr.Markdown(description)
276
 
277
+ keyTxt.submit(submit_key, keyTxt, [user_api_key, status_display])
278
  # Chatbot
279
+ user_input.submit(
280
+ predict,
281
+ [
282
+ user_api_key,
283
+ systemPromptTxt,
284
+ history,
285
+ user_input,
286
+ chatbot,
287
+ token_count,
288
+ top_p,
289
+ temperature,
290
+ use_streaming_checkbox,
291
+ model_select_dropdown,
292
+ use_websearch_checkbox,
293
+ index_files
294
+ ],
295
+ [chatbot, history, status_display, token_count],
296
+ show_progress=True,
297
+ )
298
  user_input.submit(reset_textbox, [], [user_input])
299
 
300
+ submitBtn.click(
301
+ predict,
302
+ [
303
+ user_api_key,
304
+ systemPromptTxt,
305
+ history,
306
+ user_input,
307
+ chatbot,
308
+ token_count,
309
+ top_p,
310
+ temperature,
311
+ use_streaming_checkbox,
312
+ model_select_dropdown,
313
+ use_websearch_checkbox,
314
+ index_files
315
+ ],
316
+ [chatbot, history, status_display, token_count],
317
+ show_progress=True,
318
+ )
319
  submitBtn.click(reset_textbox, [], [user_input])
320
 
321
+ emptyBtn.click(
322
+ reset_state,
323
+ outputs=[chatbot, history, token_count, status_display],
324
+ show_progress=True,
325
+ )
326
 
327
+ retryBtn.click(
328
+ retry,
329
+ [
330
+ user_api_key,
331
+ systemPromptTxt,
332
+ history,
333
+ chatbot,
334
+ token_count,
335
+ top_p,
336
+ temperature,
337
+ use_streaming_checkbox,
338
+ model_select_dropdown,
339
+ ],
340
+ [chatbot, history, status_display, token_count],
341
+ show_progress=True,
342
+ )
343
 
344
+ delLastBtn.click(
345
+ delete_last_conversation,
346
+ [chatbot, history, token_count],
347
+ [chatbot, history, token_count, status_display],
348
+ show_progress=True,
349
+ )
350
 
351
+ reduceTokenBtn.click(
352
+ reduce_token_size,
353
+ [
354
+ user_api_key,
355
+ systemPromptTxt,
356
+ history,
357
+ chatbot,
358
+ token_count,
359
+ top_p,
360
+ temperature,
361
+ use_streaming_checkbox,
362
+ model_select_dropdown,
363
+ ],
364
+ [chatbot, history, status_display, token_count],
365
+ show_progress=True,
366
+ )
367
 
368
  # Template
369
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
370
+ templateFileSelectDropdown.change(
371
+ load_template,
372
+ [templateFileSelectDropdown],
373
+ [promptTemplates, templateSelectDropdown],
374
+ show_progress=True,
375
+ )
376
+ templateSelectDropdown.change(
377
+ get_template_content,
378
+ [promptTemplates, templateSelectDropdown, systemPromptTxt],
379
+ [systemPromptTxt],
380
+ show_progress=True,
381
+ )
382
 
383
  # S&L
384
+ saveHistoryBtn.click(
385
+ save_chat_history,
386
+ [saveFileName, systemPromptTxt, history, chatbot],
387
+ downloadFile,
388
+ show_progress=True,
389
+ )
390
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
391
+ exportMarkdownBtn.click(
392
+ export_markdown,
393
+ [saveFileName, systemPromptTxt, history, chatbot],
394
+ downloadFile,
395
+ show_progress=True,
396
+ )
397
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
398
+ historyFileSelectDropdown.change(
399
+ load_chat_history,
400
+ [historyFileSelectDropdown, systemPromptTxt, history, chatbot],
401
+ [saveFileName, systemPromptTxt, history, chatbot],
402
+ show_progress=True,
403
+ )
404
+ downloadFile.change(
405
+ load_chat_history,
406
+ [downloadFile, systemPromptTxt, history, chatbot],
407
+ [saveFileName, systemPromptTxt, history, chatbot],
408
+ )
409
 
410
+ # Advanced
411
+ default_btn.click(
412
+ reset_default, [], [apiurlTxt, proxyTxt, status_display], show_progress=True
413
+ )
414
+ changeAPIURLBtn.click(
415
+ change_api_url,
416
+ [apiurlTxt],
417
+ [status_display],
418
+ show_progress=True,
419
+ )
420
+ changeProxyBtn.click(
421
+ change_proxy,
422
+ [proxyTxt],
423
+ [status_display],
424
+ show_progress=True,
425
+ )
426
 
427
+ logging.info(
428
+ colorama.Back.GREEN
429
+ + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面"
430
+ + colorama.Style.RESET_ALL
431
+ )
432
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
433
  demo.title = "川虎ChatGPT 🚀"
434
 
435
  if __name__ == "__main__":
436
+ # if running in Docker
437
  if dockerflag:
438
  if authflag:
439
+ demo.queue().launch(
440
+ server_name="0.0.0.0", server_port=7860, auth=(username, password)
441
+ )
442
  else:
443
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
444
+ # if not running in Docker
445
  else:
446
  if authflag:
447
  demo.queue().launch(share=False, auth=(username, password))
448
  else:
449
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
450
+ # demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
451
+ # demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
452
+ # demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
chat_func.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ from __future__ import annotations
3
+ from typing import TYPE_CHECKING, List
4
+
5
+ import logging
6
+ import json
7
+ import os
8
+ import requests
9
+
10
+ from tqdm import tqdm
11
+ import colorama
12
+ from duckduckgo_search import ddg
13
+
14
+ from presets import *
15
+ from llama_func import *
16
+ from utils import *
17
+
18
+ # logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
19
+
20
+ if TYPE_CHECKING:
21
+ from typing import TypedDict
22
+
23
+ class DataframeData(TypedDict):
24
+ headers: List[str]
25
+ data: List[List[str | int | bool]]
26
+
27
+
28
+ initial_prompt = "You are a helpful assistant."
29
+ API_URL = "https://api.openai.com/v1/chat/completions"
30
+ HISTORY_DIR = "history"
31
+ TEMPLATES_DIR = "templates"
32
+
33
+ def get_response(
34
+ openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model
35
+ ):
36
+ headers = {
37
+ "Content-Type": "application/json",
38
+ "Authorization": f"Bearer {openai_api_key}",
39
+ }
40
+
41
+ history = [construct_system(system_prompt), *history]
42
+
43
+ payload = {
44
+ "model": selected_model,
45
+ "messages": history, # [{"role": "user", "content": f"{inputs}"}],
46
+ "temperature": temperature, # 1.0,
47
+ "top_p": top_p, # 1.0,
48
+ "n": 1,
49
+ "stream": stream,
50
+ "presence_penalty": 0,
51
+ "frequency_penalty": 0,
52
+ }
53
+ if stream:
54
+ timeout = timeout_streaming
55
+ else:
56
+ timeout = timeout_all
57
+
58
+ # 获取环境变量中的代理设置
59
+ http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy")
60
+ https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy")
61
+
62
+ # 如果存在代理设置,使用它们
63
+ proxies = {}
64
+ if http_proxy:
65
+ logging.info(f"Using HTTP proxy: {http_proxy}")
66
+ proxies["http"] = http_proxy
67
+ if https_proxy:
68
+ logging.info(f"Using HTTPS proxy: {https_proxy}")
69
+ proxies["https"] = https_proxy
70
+
71
+ # 如果有代理,使用代理发送请求,否则使用默认设置发送请求
72
+ if proxies:
73
+ response = requests.post(
74
+ API_URL,
75
+ headers=headers,
76
+ json=payload,
77
+ stream=True,
78
+ timeout=timeout,
79
+ proxies=proxies,
80
+ )
81
+ else:
82
+ response = requests.post(
83
+ API_URL,
84
+ headers=headers,
85
+ json=payload,
86
+ stream=True,
87
+ timeout=timeout,
88
+ )
89
+ return response
90
+
91
+
92
+ def stream_predict(
93
+ openai_api_key,
94
+ system_prompt,
95
+ history,
96
+ inputs,
97
+ chatbot,
98
+ all_token_counts,
99
+ top_p,
100
+ temperature,
101
+ selected_model,
102
+ ):
103
+ def get_return_value():
104
+ return chatbot, history, status_text, all_token_counts
105
+
106
+ logging.info("实时回答模式")
107
+ partial_words = ""
108
+ counter = 0
109
+ status_text = "开始实时传输回答……"
110
+ history.append(construct_user(inputs))
111
+ history.append(construct_assistant(""))
112
+ chatbot.append((parse_text(inputs), ""))
113
+ user_token_count = 0
114
+ if len(all_token_counts) == 0:
115
+ system_prompt_token_count = count_token(construct_system(system_prompt))
116
+ user_token_count = (
117
+ count_token(construct_user(inputs)) + system_prompt_token_count
118
+ )
119
+ else:
120
+ user_token_count = count_token(construct_user(inputs))
121
+ all_token_counts.append(user_token_count)
122
+ logging.info(f"输入token计数: {user_token_count}")
123
+ yield get_return_value()
124
+ try:
125
+ response = get_response(
126
+ openai_api_key,
127
+ system_prompt,
128
+ history,
129
+ temperature,
130
+ top_p,
131
+ True,
132
+ selected_model,
133
+ )
134
+ except requests.exceptions.ConnectTimeout:
135
+ status_text = (
136
+ standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
137
+ )
138
+ yield get_return_value()
139
+ return
140
+ except requests.exceptions.ReadTimeout:
141
+ status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
142
+ yield get_return_value()
143
+ return
144
+
145
+ yield get_return_value()
146
+ error_json_str = ""
147
+
148
+ for chunk in tqdm(response.iter_lines()):
149
+ if counter == 0:
150
+ counter += 1
151
+ continue
152
+ counter += 1
153
+ # check whether each line is non-empty
154
+ if chunk:
155
+ chunk = chunk.decode()
156
+ chunklength = len(chunk)
157
+ try:
158
+ chunk = json.loads(chunk[6:])
159
+ except json.JSONDecodeError:
160
+ logging.info(chunk)
161
+ error_json_str += chunk
162
+ status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
163
+ yield get_return_value()
164
+ continue
165
+ # decode each line as response data is in bytes
166
+ if chunklength > 6 and "delta" in chunk["choices"][0]:
167
+ finish_reason = chunk["choices"][0]["finish_reason"]
168
+ status_text = construct_token_message(
169
+ sum(all_token_counts), stream=True
170
+ )
171
+ if finish_reason == "stop":
172
+ yield get_return_value()
173
+ break
174
+ try:
175
+ partial_words = (
176
+ partial_words + chunk["choices"][0]["delta"]["content"]
177
+ )
178
+ except KeyError:
179
+ status_text = (
180
+ standard_error_msg
181
+ + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: "
182
+ + str(sum(all_token_counts))
183
+ )
184
+ yield get_return_value()
185
+ break
186
+ history[-1] = construct_assistant(partial_words)
187
+ chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
188
+ all_token_counts[-1] += 1
189
+ yield get_return_value()
190
+
191
+
192
+ def predict_all(
193
+ openai_api_key,
194
+ system_prompt,
195
+ history,
196
+ inputs,
197
+ chatbot,
198
+ all_token_counts,
199
+ top_p,
200
+ temperature,
201
+ selected_model,
202
+ ):
203
+ logging.info("一次性回答模式")
204
+ history.append(construct_user(inputs))
205
+ history.append(construct_assistant(""))
206
+ chatbot.append((parse_text(inputs), ""))
207
+ all_token_counts.append(count_token(construct_user(inputs)))
208
+ try:
209
+ response = get_response(
210
+ openai_api_key,
211
+ system_prompt,
212
+ history,
213
+ temperature,
214
+ top_p,
215
+ False,
216
+ selected_model,
217
+ )
218
+ except requests.exceptions.ConnectTimeout:
219
+ status_text = (
220
+ standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
221
+ )
222
+ return chatbot, history, status_text, all_token_counts
223
+ except requests.exceptions.ProxyError:
224
+ status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
225
+ return chatbot, history, status_text, all_token_counts
226
+ except requests.exceptions.SSLError:
227
+ status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
228
+ return chatbot, history, status_text, all_token_counts
229
+ response = json.loads(response.text)
230
+ content = response["choices"][0]["message"]["content"]
231
+ history[-1] = construct_assistant(content)
232
+ chatbot[-1] = (parse_text(inputs), parse_text(content))
233
+ total_token_count = response["usage"]["total_tokens"]
234
+ all_token_counts[-1] = total_token_count - sum(all_token_counts)
235
+ status_text = construct_token_message(total_token_count)
236
+ return chatbot, history, status_text, all_token_counts
237
+
238
+
239
+ def predict(
240
+ openai_api_key,
241
+ system_prompt,
242
+ history,
243
+ inputs,
244
+ chatbot,
245
+ all_token_counts,
246
+ top_p,
247
+ temperature,
248
+ stream=False,
249
+ selected_model=MODELS[0],
250
+ use_websearch_checkbox=False,
251
+ files = None,
252
+ should_check_token_count=True,
253
+ ): # repetition_penalty, top_k
254
+ logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
255
+ if files:
256
+ msg = "构建索引中……(这可能需要比较久的时间)"
257
+ logging.info(msg)
258
+ yield chatbot, history, msg, all_token_counts
259
+ index = construct_index(openai_api_key, file_src=files)
260
+ msg = "索引构建完成,获取回答中……"
261
+ yield chatbot, history, msg, all_token_counts
262
+ history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot)
263
+ yield chatbot, history, status_text, all_token_counts
264
+ return
265
+ if use_websearch_checkbox:
266
+ results = ddg(inputs, max_results=3)
267
+ web_results = []
268
+ for idx, result in enumerate(results):
269
+ logging.info(f"搜索结果{idx + 1}:{result}")
270
+ web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
271
+ web_results = "\n\n".join(web_results)
272
+ inputs = (
273
+ replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
274
+ .replace("{query}", inputs)
275
+ .replace("{web_results}", web_results)
276
+ )
277
+ if len(openai_api_key) != 51:
278
+ status_text = standard_error_msg + no_apikey_msg
279
+ logging.info(status_text)
280
+ chatbot.append((parse_text(inputs), ""))
281
+ if len(history) == 0:
282
+ history.append(construct_user(inputs))
283
+ history.append("")
284
+ all_token_counts.append(0)
285
+ else:
286
+ history[-2] = construct_user(inputs)
287
+ yield chatbot, history, status_text, all_token_counts
288
+ return
289
+ if stream:
290
+ yield chatbot, history, "开始生成回答……", all_token_counts
291
+ if stream:
292
+ logging.info("使用流式传输")
293
+ iter = stream_predict(
294
+ openai_api_key,
295
+ system_prompt,
296
+ history,
297
+ inputs,
298
+ chatbot,
299
+ all_token_counts,
300
+ top_p,
301
+ temperature,
302
+ selected_model,
303
+ )
304
+ for chatbot, history, status_text, all_token_counts in iter:
305
+ yield chatbot, history, status_text, all_token_counts
306
+ else:
307
+ logging.info("不使用流式传输")
308
+ chatbot, history, status_text, all_token_counts = predict_all(
309
+ openai_api_key,
310
+ system_prompt,
311
+ history,
312
+ inputs,
313
+ chatbot,
314
+ all_token_counts,
315
+ top_p,
316
+ temperature,
317
+ selected_model,
318
+ )
319
+ yield chatbot, history, status_text, all_token_counts
320
+ logging.info(f"传输完毕。当前token计数为{all_token_counts}")
321
+ if len(history) > 1 and history[-1]["content"] != inputs:
322
+ logging.info(
323
+ "回答为:"
324
+ + colorama.Fore.BLUE
325
+ + f"{history[-1]['content']}"
326
+ + colorama.Style.RESET_ALL
327
+ )
328
+ if stream:
329
+ max_token = max_token_streaming
330
+ else:
331
+ max_token = max_token_all
332
+ if sum(all_token_counts) > max_token and should_check_token_count:
333
+ status_text = f"精简token中{all_token_counts}/{max_token}"
334
+ logging.info(status_text)
335
+ yield chatbot, history, status_text, all_token_counts
336
+ iter = reduce_token_size(
337
+ openai_api_key,
338
+ system_prompt,
339
+ history,
340
+ chatbot,
341
+ all_token_counts,
342
+ top_p,
343
+ temperature,
344
+ stream=False,
345
+ selected_model=selected_model,
346
+ hidden=True,
347
+ )
348
+ for chatbot, history, status_text, all_token_counts in iter:
349
+ status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
350
+ yield chatbot, history, status_text, all_token_counts
351
+
352
+
353
+ def retry(
354
+ openai_api_key,
355
+ system_prompt,
356
+ history,
357
+ chatbot,
358
+ token_count,
359
+ top_p,
360
+ temperature,
361
+ stream=False,
362
+ selected_model=MODELS[0],
363
+ ):
364
+ logging.info("重试中……")
365
+ if len(history) == 0:
366
+ yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
367
+ return
368
+ history.pop()
369
+ inputs = history.pop()["content"]
370
+ token_count.pop()
371
+ iter = predict(
372
+ openai_api_key,
373
+ system_prompt,
374
+ history,
375
+ inputs,
376
+ chatbot,
377
+ token_count,
378
+ top_p,
379
+ temperature,
380
+ stream=stream,
381
+ selected_model=selected_model,
382
+ )
383
+ logging.info("重试完毕")
384
+ for x in iter:
385
+ yield x
386
+
387
+
388
+ def reduce_token_size(
389
+ openai_api_key,
390
+ system_prompt,
391
+ history,
392
+ chatbot,
393
+ token_count,
394
+ top_p,
395
+ temperature,
396
+ stream=False,
397
+ selected_model=MODELS[0],
398
+ hidden=False,
399
+ ):
400
+ logging.info("开始减少token数量……")
401
+ iter = predict(
402
+ openai_api_key,
403
+ system_prompt,
404
+ history,
405
+ summarize_prompt,
406
+ chatbot,
407
+ token_count,
408
+ top_p,
409
+ temperature,
410
+ stream=stream,
411
+ selected_model=selected_model,
412
+ should_check_token_count=False,
413
+ )
414
+ logging.info(f"chatbot: {chatbot}")
415
+ for chatbot, history, status_text, previous_token_count in iter:
416
+ history = history[-2:]
417
+ token_count = previous_token_count[-1:]
418
+ if hidden:
419
+ chatbot.pop()
420
+ yield chatbot, history, construct_token_message(
421
+ sum(token_count), stream=stream
422
+ ), token_count
423
+ logging.info("减少token数量完毕")
custom.css ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* status_display */
2
+ #status_display {
3
+ display: flex;
4
+ min-height: 2.5em;
5
+ align-items: flex-end;
6
+ justify-content: flex-end;
7
+ }
8
+ #status_display p {
9
+ font-size: .85em;
10
+ font-family: monospace;
11
+ color: var(--text-color-subdued) !important;
12
+ }
13
+ /* chatbot */
14
+ :root {
15
+ --bg-color-light: #F3F3F3;
16
+ --bg-color-dark: #121111;
17
+ }
18
+
19
+ @media (prefers-color-scheme: light) {
20
+ #chuanhu_chatbot {
21
+ background-color: var(--bg-color-light) !important;
22
+ }
23
+ [data-testid = "bot"] {
24
+ background-color: #FFFFFF !important;
25
+ }
26
+ [data-testid = "user"] {
27
+ background-color: #95EC69 !important;
28
+ }
29
+ }
30
+
31
+ @media (prefers-color-scheme: dark) {
32
+ #chuanhu_chatbot {
33
+ background-color: var(--bg-color-dark) !important;
34
+ }
35
+ [data-testid = "bot"] {
36
+ background-color: #2C2C2C !important;
37
+ }
38
+ [data-testid = "user"] {
39
+ background-color: #26B561 !important;
40
+ }
41
+ }
42
+
43
+ /* 屏幕宽度大于等于500px的设备 */
44
+ @media (min-width: 500px) {
45
+ #chuanhu_chatbot {
46
+ height: calc(100vh - 200px);
47
+ }
48
+ #chuanhu_chatbot .wrap {
49
+ max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
50
+ }
51
+ }
52
+ /* 屏幕宽度小于500px的设备 */
53
+ @media (max-width: 499px) {
54
+ #chuanhu_chatbot {
55
+ height: calc(100vh - 140px);
56
+ }
57
+ #chuanhu_chatbot .wrap {
58
+ max-height: calc(100vh - 140 - var(--line-sm)*1rem - 2*var(--block-label-margin) );
59
+ }
60
+ }
61
+ /* 对话气泡 */
62
+ [class *= "message"] {
63
+ border-radius: var(--radius-xl) !important;
64
+ border: none;
65
+ padding: var(--spacing-xl) !important;
66
+ font-size: var(--text-md) !important;
67
+ line-height: var(--line-md) !important;
68
+ }
69
+ [data-testid = "bot"] {
70
+ max-width: 85%;
71
+ border-bottom-left-radius: 0 !important;
72
+ }
73
+ [data-testid = "user"] {
74
+ max-width: 85%;
75
+ width: auto !important;
76
+ border-bottom-right-radius: 0 !important;
77
+ }
78
+ /* 表格 */
79
+ table {
80
+ margin: 1em 0;
81
+ border-collapse: collapse;
82
+ empty-cells: show;
83
+ }
84
+ td,th {
85
+ border: 1.2px solid var(--color-border-primary) !important;
86
+ padding: 0.2em;
87
+ }
88
+ thead {
89
+ background-color: rgba(175,184,193,0.2);
90
+ }
91
+ thead th {
92
+ padding: .5em .2em;
93
+ }
94
+ /* 行内代码 */
95
+ code {
96
+ display: inline;
97
+ white-space: break-spaces;
98
+ border-radius: 6px;
99
+ margin: 0 2px 0 2px;
100
+ padding: .2em .4em .1em .4em;
101
+ background-color: rgba(175,184,193,0.2);
102
+ }
103
+ /* 代码块 */
104
+ pre code {
105
+ display: block;
106
+ white-space: pre;
107
+ background-color: hsla(0, 0%, 0%, 80%)!important;
108
+ border-radius: 10px;
109
+ padding: 1rem 1.2rem 1rem;
110
+ margin: 1.2em 2em 1.2em 0.5em;
111
+ color: #FFF;
112
+ box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
113
+ }
114
+ /* 代码高亮样式 */
115
+ .codehilite .hll { background-color: #49483e }
116
+ .codehilite .c { color: #75715e } /* Comment */
117
+ .codehilite .err { color: #960050; background-color: #1e0010 } /* Error */
118
+ .codehilite .k { color: #66d9ef } /* Keyword */
119
+ .codehilite .l { color: #ae81ff } /* Literal */
120
+ .codehilite .n { color: #f8f8f2 } /* Name */
121
+ .codehilite .o { color: #f92672 } /* Operator */
122
+ .codehilite .p { color: #f8f8f2 } /* Punctuation */
123
+ .codehilite .ch { color: #75715e } /* Comment.Hashbang */
124
+ .codehilite .cm { color: #75715e } /* Comment.Multiline */
125
+ .codehilite .cp { color: #75715e } /* Comment.Preproc */
126
+ .codehilite .cpf { color: #75715e } /* Comment.PreprocFile */
127
+ .codehilite .c1 { color: #75715e } /* Comment.Single */
128
+ .codehilite .cs { color: #75715e } /* Comment.Special */
129
+ .codehilite .gd { color: #f92672 } /* Generic.Deleted */
130
+ .codehilite .ge { font-style: italic } /* Generic.Emph */
131
+ .codehilite .gi { color: #a6e22e } /* Generic.Inserted */
132
+ .codehilite .gs { font-weight: bold } /* Generic.Strong */
133
+ .codehilite .gu { color: #75715e } /* Generic.Subheading */
134
+ .codehilite .kc { color: #66d9ef } /* Keyword.Constant */
135
+ .codehilite .kd { color: #66d9ef } /* Keyword.Declaration */
136
+ .codehilite .kn { color: #f92672 } /* Keyword.Namespace */
137
+ .codehilite .kp { color: #66d9ef } /* Keyword.Pseudo */
138
+ .codehilite .kr { color: #66d9ef } /* Keyword.Reserved */
139
+ .codehilite .kt { color: #66d9ef } /* Keyword.Type */
140
+ .codehilite .ld { color: #e6db74 } /* Literal.Date */
141
+ .codehilite .m { color: #ae81ff } /* Literal.Number */
142
+ .codehilite .s { color: #e6db74 } /* Literal.String */
143
+ .codehilite .na { color: #a6e22e } /* Name.Attribute */
144
+ .codehilite .nb { color: #f8f8f2 } /* Name.Builtin */
145
+ .codehilite .nc { color: #a6e22e } /* Name.Class */
146
+ .codehilite .no { color: #66d9ef } /* Name.Constant */
147
+ .codehilite .nd { color: #a6e22e } /* Name.Decorator */
148
+ .codehilite .ni { color: #f8f8f2 } /* Name.Entity */
149
+ .codehilite .ne { color: #a6e22e } /* Name.Exception */
150
+ .codehilite .nf { color: #a6e22e } /* Name.Function */
151
+ .codehilite .nl { color: #f8f8f2 } /* Name.Label */
152
+ .codehilite .nn { color: #f8f8f2 } /* Name.Namespace */
153
+ .codehilite .nx { color: #a6e22e } /* Name.Other */
154
+ .codehilite .py { color: #f8f8f2 } /* Name.Property */
155
+ .codehilite .nt { color: #f92672 } /* Name.Tag */
156
+ .codehilite .nv { color: #f8f8f2 } /* Name.Variable */
157
+ .codehilite .ow { color: #f92672 } /* Operator.Word */
158
+ .codehilite .w { color: #f8f8f2 } /* Text.Whitespace */
159
+ .codehilite .mb { color: #ae81ff } /* Literal.Number.Bin */
160
+ .codehilite .mf { color: #ae81ff } /* Literal.Number.Float */
161
+ .codehilite .mh { color: #ae81ff } /* Literal.Number.Hex */
162
+ .codehilite .mi { color: #ae81ff } /* Literal.Number.Integer */
163
+ .codehilite .mo { color: #ae81ff } /* Literal.Number.Oct */
164
+ .codehilite .sa { color: #e6db74 } /* Literal.String.Affix */
165
+ .codehilite .sb { color: #e6db74 } /* Literal.String.Backtick */
166
+ .codehilite .sc { color: #e6db74 } /* Literal.String.Char */
167
+ .codehilite .dl { color: #e6db74 } /* Literal.String.Delimiter */
168
+ .codehilite .sd { color: #e6db74 } /* Literal.String.Doc */
169
+ .codehilite .s2 { color: #e6db74 } /* Literal.String.Double */
170
+ .codehilite .se { color: #ae81ff } /* Literal.String.Escape */
171
+ .codehilite .sh { color: #e6db74 } /* Literal.String.Heredoc */
172
+ .codehilite .si { color: #e6db74 } /* Literal.String.Interpol */
173
+ .codehilite .sx { color: #e6db74 } /* Literal.String.Other */
174
+ .codehilite .sr { color: #e6db74 } /* Literal.String.Regex */
175
+ .codehilite .s1 { color: #e6db74 } /* Literal.String.Single */
176
+ .codehilite .ss { color: #e6db74 } /* Literal.String.Symbol */
177
+ .codehilite .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
178
+ .codehilite .fm { color: #a6e22e } /* Name.Function.Magic */
179
+ .codehilite .vc { color: #f8f8f2 } /* Name.Variable.Class */
180
+ .codehilite .vg { color: #f8f8f2 } /* Name.Variable.Global */
181
+ .codehilite .vi { color: #f8f8f2 } /* Name.Variable.Instance */
182
+ .codehilite .vm { color: #f8f8f2 } /* Name.Variable.Magic */
183
+ .codehilite .il { color: #ae81ff } /* Literal.Number.Integer.Long */
184
+
185
+ /* 全局元素 */
186
+ * {
187
+ transition: all 0.6s;
188
+ }
llama_func.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+
4
+ from llama_index import GPTSimpleVectorIndex
5
+ from llama_index import download_loader
6
+ from llama_index import (
7
+ Document,
8
+ LLMPredictor,
9
+ PromptHelper,
10
+ QuestionAnswerPrompt,
11
+ RefinePrompt,
12
+ )
13
+ from langchain.llms import OpenAI
14
+ import colorama
15
+
16
+
17
+ from presets import *
18
+ from utils import *
19
+
20
+
21
+ def get_documents(file_src):
22
+ documents = []
23
+ index_name = ""
24
+ logging.debug("Loading documents...")
25
+ logging.debug(f"file_src: {file_src}")
26
+ for file in file_src:
27
+ logging.debug(f"file: {file.name}")
28
+ index_name += file.name
29
+ if os.path.splitext(file.name)[1] == ".pdf":
30
+ logging.debug("Loading PDF...")
31
+ CJKPDFReader = download_loader("CJKPDFReader")
32
+ loader = CJKPDFReader()
33
+ documents += loader.load_data(file=file.name)
34
+ elif os.path.splitext(file.name)[1] == ".docx":
35
+ logging.debug("Loading DOCX...")
36
+ DocxReader = download_loader("DocxReader")
37
+ loader = DocxReader()
38
+ documents += loader.load_data(file=file.name)
39
+ elif os.path.splitext(file.name)[1] == ".epub":
40
+ logging.debug("Loading EPUB...")
41
+ EpubReader = download_loader("EpubReader")
42
+ loader = EpubReader()
43
+ documents += loader.load_data(file=file.name)
44
+ else:
45
+ logging.debug("Loading text file...")
46
+ with open(file.name, "r", encoding="utf-8") as f:
47
+ text = add_space(f.read())
48
+ documents += [Document(text)]
49
+ index_name = sha1sum(index_name)
50
+ return documents, index_name
51
+
52
+
53
+ def construct_index(
54
+ api_key,
55
+ file_src,
56
+ max_input_size=4096,
57
+ num_outputs=1,
58
+ max_chunk_overlap=20,
59
+ chunk_size_limit=600,
60
+ embedding_limit=None,
61
+ separator=" ",
62
+ num_children=10,
63
+ max_keywords_per_chunk=10,
64
+ ):
65
+ os.environ["OPENAI_API_KEY"] = api_key
66
+ chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
67
+ embedding_limit = None if embedding_limit == 0 else embedding_limit
68
+ separator = " " if separator == "" else separator
69
+
70
+ llm_predictor = LLMPredictor(
71
+ llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
72
+ )
73
+ prompt_helper = PromptHelper(
74
+ max_input_size,
75
+ num_outputs,
76
+ max_chunk_overlap,
77
+ embedding_limit,
78
+ chunk_size_limit,
79
+ separator=separator,
80
+ )
81
+ documents, index_name = get_documents(file_src)
82
+ if os.path.exists(f"./index/{index_name}.json"):
83
+ logging.info("找到了缓存的索引文件,加载中……")
84
+ return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
85
+ else:
86
+ try:
87
+ logging.debug("构建索引中……")
88
+ index = GPTSimpleVectorIndex(
89
+ documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
90
+ )
91
+ os.makedirs("./index", exist_ok=True)
92
+ index.save_to_disk(f"./index/{index_name}.json")
93
+ return index
94
+ except Exception as e:
95
+ print(e)
96
+ return None
97
+
98
+
99
+ def chat_ai(
100
+ api_key,
101
+ index,
102
+ question,
103
+ context,
104
+ chatbot,
105
+ ):
106
+ os.environ["OPENAI_API_KEY"] = api_key
107
+
108
+ logging.info(f"Question: {question}")
109
+
110
+ response, chatbot_display, status_text = ask_ai(
111
+ api_key,
112
+ index,
113
+ question,
114
+ replace_today(PROMPT_TEMPLATE),
115
+ REFINE_TEMPLATE,
116
+ SIM_K,
117
+ INDEX_QUERY_TEMPRATURE,
118
+ context,
119
+ )
120
+ if response is None:
121
+ status_text = "查询失败,请换个问法试试"
122
+ return context, chatbot
123
+ response = response
124
+
125
+ context.append({"role": "user", "content": question})
126
+ context.append({"role": "assistant", "content": response})
127
+ chatbot.append((question, chatbot_display))
128
+
129
+ os.environ["OPENAI_API_KEY"] = ""
130
+ return context, chatbot, status_text
131
+
132
+
133
+ def ask_ai(
134
+ api_key,
135
+ index,
136
+ question,
137
+ prompt_tmpl,
138
+ refine_tmpl,
139
+ sim_k=1,
140
+ temprature=0,
141
+ prefix_messages=[],
142
+ ):
143
+ os.environ["OPENAI_API_KEY"] = api_key
144
+
145
+ logging.debug("Index file found")
146
+ logging.debug("Querying index...")
147
+ llm_predictor = LLMPredictor(
148
+ llm=OpenAI(
149
+ temperature=temprature,
150
+ model_name="gpt-3.5-turbo-0301",
151
+ prefix_messages=prefix_messages,
152
+ )
153
+ )
154
+
155
+ response = None # Initialize response variable to avoid UnboundLocalError
156
+ qa_prompt = QuestionAnswerPrompt(prompt_tmpl)
157
+ rf_prompt = RefinePrompt(refine_tmpl)
158
+ response = index.query(
159
+ question,
160
+ llm_predictor=llm_predictor,
161
+ similarity_top_k=sim_k,
162
+ text_qa_template=qa_prompt,
163
+ refine_template=rf_prompt,
164
+ response_mode="compact",
165
+ )
166
+
167
+ if response is not None:
168
+ logging.info(f"Response: {response}")
169
+ ret_text = response.response
170
+ nodes = []
171
+ for index, node in enumerate(response.source_nodes):
172
+ brief = node.source_text[:25].replace("\n", "")
173
+ nodes.append(
174
+ f"<details><summary>[{index+1}]\t{brief}...</summary><p>{node.source_text}</p></details>"
175
+ )
176
+ new_response = ret_text + "\n----------\n" + "\n\n".join(nodes)
177
+ logging.info(
178
+ f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
179
+ )
180
+ os.environ["OPENAI_API_KEY"] = ""
181
+ return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens"
182
+ else:
183
+ logging.warning("No response found, returning None")
184
+ os.environ["OPENAI_API_KEY"] = ""
185
+ return None
186
+
187
+
188
+ def add_space(text):
189
+ punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
190
+ for cn_punc, en_punc in punctuations.items():
191
+ text = text.replace(cn_punc, en_punc)
192
+ return text
overwrites.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import logging
3
+
4
+ from llama_index import Prompt
5
+ from typing import List, Tuple
6
+ import mdtex2html
7
+
8
+ from presets import *
9
+ from llama_func import *
10
+
11
+
12
+ def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
13
+ logging.debug("Compacting text chunks...🚀🚀🚀")
14
+ combined_str = [c.strip() for c in text_chunks if c.strip()]
15
+ combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
16
+ combined_str = "\n\n".join(combined_str)
17
+ # resplit based on self.max_chunk_overlap
18
+ text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
19
+ return text_splitter.split_text(combined_str)
20
+
21
+
22
+ def postprocess(
23
+ self, y: List[Tuple[str | None, str | None]]
24
+ ) -> List[Tuple[str | None, str | None]]:
25
+ """
26
+ Parameters:
27
+ y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
28
+ Returns:
29
+ List of tuples representing the message and response. Each message and response will be a string of HTML.
30
+ """
31
+ if y is None:
32
+ return []
33
+ for i, (message, response) in enumerate(y):
34
+ y[i] = (
35
+ # None if message is None else markdown.markdown(message),
36
+ # None if response is None else markdown.markdown(response),
37
+ None if message is None else message,
38
+ None if response is None else mdtex2html.convert(response, extensions=['fenced_code','codehilite','tables']),
39
+ )
40
+ return y
presets.py CHANGED
@@ -1,6 +1,26 @@
1
  # -*- coding:utf-8 -*-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  title = """<h1 align="left" style="min-width:200px; margin-top:0;">川虎ChatGPT 🚀</h1>"""
3
- description = """<div align="center" style="margin:16px 0">
 
4
 
5
  由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
6
 
@@ -9,62 +29,21 @@ description = """<div align="center" style="margin:16px 0">
9
  此App使用 `gpt-3.5-turbo` 大语言模型
10
  </div>
11
  """
12
- customCSS = """
13
- #status_display {
14
- display: flex;
15
- min-height: 2.5em;
16
- align-items: flex-end;
17
- justify-content: flex-end;
18
- }
19
- #status_display p {
20
- font-size: .85em;
21
- font-family: monospace;
22
- color: var(--text-color-subdued) !important;
23
- }
24
- [class *= "message"] {
25
- border-radius: var(--radius-xl) !important;
26
- border: none;
27
- padding: var(--spacing-xl) !important;
28
- font-size: var(--text-md) !important;
29
- line-height: var(--line-md) !important;
30
- }
31
- [data-testid = "bot"] {
32
- max-width: 85%;
33
- border-bottom-left-radius: 0 !important;
34
- }
35
- [data-testid = "user"] {
36
- max-width: 85%;
37
- width: auto !important;
38
- border-bottom-right-radius: 0 !important;
39
- }
40
- code {
41
- display: inline;
42
- white-space: break-spaces;
43
- border-radius: 6px;
44
- margin: 0 2px 0 2px;
45
- padding: .2em .4em .1em .4em;
46
- background-color: rgba(175,184,193,0.2);
47
- }
48
- pre code {
49
- display: block;
50
- white-space: pre;
51
- background-color: hsla(0, 0%, 0%, 72%);
52
- border: solid 5px var(--color-border-primary) !important;
53
- border-radius: 10px;
54
- padding: 0 1.2rem 1.2rem;
55
- margin-top: 1em !important;
56
- color: #FFF;
57
- box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
58
- }
59
 
60
- * {
61
- transition: all 0.6s;
62
- }
63
- """
64
 
65
- summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
66
- MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-4","gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"] # 可选的模型
67
- websearch_prompt = """Web search results:
 
 
 
 
 
 
 
 
 
68
 
69
  {web_results}
70
  Current date: {current_date}
@@ -73,18 +52,29 @@ Instructions: Using the provided web search results, write a comprehensive reply
73
  Query: {query}
74
  Reply in 中文"""
75
 
76
- # 错误信息
77
- standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
78
- error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
79
- connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
80
- read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
81
- proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
82
- ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
83
- no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51
 
 
 
 
 
84
 
85
- max_token_streaming = 3500 # 流式对话时的最大 token 数
86
- timeout_streaming = 30 # 流式对话时的超时时间
87
- max_token_all = 3500 # 非流式对话时的最大 token
88
- timeout_all = 200 # 非流式对话时的超时时间
89
- enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
90
- HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
 
 
 
 
 
 
 
1
  # -*- coding:utf-8 -*-
2
+ # 错误信息
3
+ standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
4
+ error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
5
+ connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
6
+ read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
7
+ proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
8
+ ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
9
+ no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
10
+
11
+ max_token_streaming = 3500 # 流式对话时的最大 token 数
12
+ timeout_streaming = 30 # 流式对话时的超时时间
13
+ max_token_all = 3500 # 非流式对话时的最大 token 数
14
+ timeout_all = 200 # 非流式对话时的超时时间
15
+ enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
16
+ HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
17
+
18
+ SIM_K = 5
19
+ INDEX_QUERY_TEMPRATURE = 1.0
20
+
21
  title = """<h1 align="left" style="min-width:200px; margin-top:0;">川虎ChatGPT 🚀</h1>"""
22
+ description = """\
23
+ <div align="center" style="margin:16px 0">
24
 
25
  由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
26
 
 
29
  此App使用 `gpt-3.5-turbo` 大语言模型
30
  </div>
31
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
 
 
 
34
 
35
+ MODELS = [
36
+ "gpt-3.5-turbo",
37
+ "gpt-3.5-turbo-0301",
38
+ "gpt-4",
39
+ "gpt-4-0314",
40
+ "gpt-4-32k",
41
+ "gpt-4-32k-0314",
42
+ ] # 可选的模型
43
+
44
+
45
+ WEBSEARCH_PTOMPT_TEMPLATE = """\
46
+ Web search results:
47
 
48
  {web_results}
49
  Current date: {current_date}
 
52
  Query: {query}
53
  Reply in 中文"""
54
 
55
+ PROMPT_TEMPLATE = """\
56
+ Context information is below.
57
+ ---------------------
58
+ {context_str}
59
+ ---------------------
60
+ Current date: {current_date}.
61
+ Using the provided context information, write a comprehensive reply to the given query.
62
+ Make sure to cite results using [number] notation after the reference.
63
+ If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
64
+ Use prior knowledge only if the given context didn't provide enough information.
65
+ Answer the question: {query_str}
66
+ Reply in 中文
67
+ """
68
 
69
+ REFINE_TEMPLATE = """\
70
+ The original question is as follows: {query_str}
71
+ We have provided an existing answer: {existing_answer}
72
+ We have the opportunity to refine the existing answer
73
+ (only if needed) with some more context below.
74
+ ------------
75
+ {context_msg}
76
+ ------------
77
+ Given the new context, refine the original answer to better
78
+ Answer in the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch.
79
+ If the context isn't useful, return the original answer.
80
+ """
requirements.txt CHANGED
@@ -6,3 +6,6 @@ socksio
6
  tqdm
7
  colorama
8
  duckduckgo_search
 
 
 
 
6
  tqdm
7
  colorama
8
  duckduckgo_search
9
+ Pygments
10
+ llama_index
11
+ langchain
utils.py CHANGED
@@ -3,21 +3,16 @@ from __future__ import annotations
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
  import logging
5
  import json
6
- import gradio as gr
7
- # import openai
8
  import os
9
- import traceback
10
- import requests
11
- # import markdown
12
  import csv
13
- import mdtex2html
 
14
  from pypinyin import lazy_pinyin
15
- from presets import *
16
  import tiktoken
17
- from tqdm import tqdm
18
- import colorama
19
- from duckduckgo_search import ddg
20
- import datetime
21
 
22
  # logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
23
 
@@ -28,30 +23,12 @@ if TYPE_CHECKING:
28
  headers: List[str]
29
  data: List[List[str | int | bool]]
30
 
 
31
  initial_prompt = "You are a helpful assistant."
32
  API_URL = "https://api.openai.com/v1/chat/completions"
33
  HISTORY_DIR = "history"
34
  TEMPLATES_DIR = "templates"
35
 
36
- def postprocess(
37
- self, y: List[Tuple[str | None, str | None]]
38
- ) -> List[Tuple[str | None, str | None]]:
39
- """
40
- Parameters:
41
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
42
- Returns:
43
- List of tuples representing the message and response. Each message and response will be a string of HTML.
44
- """
45
- if y is None:
46
- return []
47
- for i, (message, response) in enumerate(y):
48
- y[i] = (
49
- # None if message is None else markdown.markdown(message),
50
- # None if response is None else markdown.markdown(response),
51
- None if message is None else message,
52
- None if response is None else mdtex2html.convert(response),
53
- )
54
- return y
55
 
56
  def count_token(message):
57
  encoding = tiktoken.get_encoding("cl100k_base")
@@ -59,251 +36,43 @@ def count_token(message):
59
  length = len(encoding.encode(input_str))
60
  return length
61
 
 
62
  def parse_text(text):
63
- lines = text.split("\n")
64
- lines = [line for line in lines if line != ""]
65
- count = 0
66
- for i, line in enumerate(lines):
67
- if "```" in line:
68
- count += 1
69
- items = line.split('`')
70
- if count % 2 == 1:
71
- lines[i] = f'<pre><code class="language-{items[-1]}">'
72
- else:
73
- lines[i] = f'<br></code></pre>'
74
  else:
75
- if i > 0:
76
- if count % 2 == 1:
77
- line = line.replace("`", "\`")
78
- line = line.replace("<", "&lt;")
79
- line = line.replace(">", "&gt;")
80
- line = line.replace(" ", "&nbsp;")
81
- line = line.replace("*", "&ast;")
82
- line = line.replace("_", "&lowbar;")
83
- line = line.replace("-", "&#45;")
84
- line = line.replace(".", "&#46;")
85
- line = line.replace("!", "&#33;")
86
- line = line.replace("(", "&#40;")
87
- line = line.replace(")", "&#41;")
88
- line = line.replace("$", "&#36;")
89
- lines[i] = "<br>"+line
90
- text = "".join(lines)
91
  return text
92
 
 
93
  def construct_text(role, text):
94
  return {"role": role, "content": text}
95
 
 
96
  def construct_user(text):
97
  return construct_text("user", text)
98
 
 
99
  def construct_system(text):
100
  return construct_text("system", text)
101
 
 
102
  def construct_assistant(text):
103
  return construct_text("assistant", text)
104
 
 
105
  def construct_token_message(token, stream=False):
106
  return f"Token 计数: {token}"
107
 
108
- def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model):
109
- headers = {
110
- "Content-Type": "application/json",
111
- "Authorization": f"Bearer {openai_api_key}"
112
- }
113
-
114
- history = [construct_system(system_prompt), *history]
115
-
116
- payload = {
117
- "model": selected_model,
118
- "messages": history, # [{"role": "user", "content": f"{inputs}"}],
119
- "temperature": temperature, # 1.0,
120
- "top_p": top_p, # 1.0,
121
- "n": 1,
122
- "stream": stream,
123
- "presence_penalty": 0,
124
- "frequency_penalty": 0,
125
- }
126
- if stream:
127
- timeout = timeout_streaming
128
- else:
129
- timeout = timeout_all
130
- response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
131
- return response
132
-
133
- def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
134
- def get_return_value():
135
- return chatbot, history, status_text, all_token_counts
136
-
137
- logging.info("实时回答模式")
138
- partial_words = ""
139
- counter = 0
140
- status_text = "开始实时传输回答……"
141
- history.append(construct_user(inputs))
142
- history.append(construct_assistant(""))
143
- chatbot.append((parse_text(inputs), ""))
144
- user_token_count = 0
145
- if len(all_token_counts) == 0:
146
- system_prompt_token_count = count_token(construct_system(system_prompt))
147
- user_token_count = count_token(construct_user(inputs)) + system_prompt_token_count
148
- else:
149
- user_token_count = count_token(construct_user(inputs))
150
- all_token_counts.append(user_token_count)
151
- logging.info(f"输入token计数: {user_token_count}")
152
- yield get_return_value()
153
- try:
154
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True, selected_model)
155
- except requests.exceptions.ConnectTimeout:
156
- status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
157
- yield get_return_value()
158
- return
159
- except requests.exceptions.ReadTimeout:
160
- status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
161
- yield get_return_value()
162
- return
163
-
164
- yield get_return_value()
165
- error_json_str = ""
166
-
167
- for chunk in tqdm(response.iter_lines()):
168
- if counter == 0:
169
- counter += 1
170
- continue
171
- counter += 1
172
- # check whether each line is non-empty
173
- if chunk:
174
- chunk = chunk.decode()
175
- chunklength = len(chunk)
176
- try:
177
- chunk = json.loads(chunk[6:])
178
- except json.JSONDecodeError:
179
- logging.info(chunk)
180
- error_json_str += chunk
181
- status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
182
- yield get_return_value()
183
- continue
184
- # decode each line as response data is in bytes
185
- if chunklength > 6 and "delta" in chunk['choices'][0]:
186
- finish_reason = chunk['choices'][0]['finish_reason']
187
- status_text = construct_token_message(sum(all_token_counts), stream=True)
188
- if finish_reason == "stop":
189
- yield get_return_value()
190
- break
191
- try:
192
- partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
193
- except KeyError:
194
- status_text = standard_error_msg + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " + str(sum(all_token_counts))
195
- yield get_return_value()
196
- break
197
- history[-1] = construct_assistant(partial_words)
198
- chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
199
- all_token_counts[-1] += 1
200
- yield get_return_value()
201
-
202
-
203
- def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
204
- logging.info("一次性回答模式")
205
- history.append(construct_user(inputs))
206
- history.append(construct_assistant(""))
207
- chatbot.append((parse_text(inputs), ""))
208
- all_token_counts.append(count_token(construct_user(inputs)))
209
- try:
210
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
211
- except requests.exceptions.ConnectTimeout:
212
- status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
213
- return chatbot, history, status_text, all_token_counts
214
- except requests.exceptions.ProxyError:
215
- status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
216
- return chatbot, history, status_text, all_token_counts
217
- except requests.exceptions.SSLError:
218
- status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
219
- return chatbot, history, status_text, all_token_counts
220
- response = json.loads(response.text)
221
- content = response["choices"][0]["message"]["content"]
222
- history[-1] = construct_assistant(content)
223
- chatbot[-1] = (parse_text(inputs), parse_text(content))
224
- total_token_count = response["usage"]["total_tokens"]
225
- all_token_counts[-1] = total_token_count - sum(all_token_counts)
226
- status_text = construct_token_message(total_token_count)
227
- return chatbot, history, status_text, all_token_counts
228
-
229
-
230
- def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model = MODELS[0], use_websearch_checkbox = False, should_check_token_count = True): # repetition_penalty, top_k
231
- logging.info("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
232
- if use_websearch_checkbox:
233
- results = ddg(inputs, max_results=3)
234
- web_results = []
235
- for idx, result in enumerate(results):
236
- logging.info(f"搜索结果{idx + 1}:{result}")
237
- web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
238
- web_results = "\n\n".join(web_results)
239
- today = datetime.datetime.today().strftime("%Y-%m-%d")
240
- inputs = websearch_prompt.replace("{current_date}", today).replace("{query}", inputs).replace("{web_results}", web_results)
241
- if len(openai_api_key) != 51:
242
- status_text = standard_error_msg + no_apikey_msg
243
- logging.info(status_text)
244
- chatbot.append((parse_text(inputs), ""))
245
- if len(history) == 0:
246
- history.append(construct_user(inputs))
247
- history.append("")
248
- all_token_counts.append(0)
249
- else:
250
- history[-2] = construct_user(inputs)
251
- yield chatbot, history, status_text, all_token_counts
252
- return
253
- if stream:
254
- yield chatbot, history, "开始生成回答……", all_token_counts
255
- if stream:
256
- logging.info("使用流式传输")
257
- iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
258
- for chatbot, history, status_text, all_token_counts in iter:
259
- yield chatbot, history, status_text, all_token_counts
260
- else:
261
- logging.info("不使用流式传输")
262
- chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
263
- yield chatbot, history, status_text, all_token_counts
264
- logging.info(f"传输完毕。当前token计数为{all_token_counts}")
265
- if len(history) > 1 and history[-1]['content'] != inputs:
266
- logging.info("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
267
- if stream:
268
- max_token = max_token_streaming
269
- else:
270
- max_token = max_token_all
271
- if sum(all_token_counts) > max_token and should_check_token_count:
272
- status_text = f"精简token中{all_token_counts}/{max_token}"
273
- logging.info(status_text)
274
- yield chatbot, history, status_text, all_token_counts
275
- iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model=selected_model, hidden=True)
276
- for chatbot, history, status_text, all_token_counts in iter:
277
- status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
278
- yield chatbot, history, status_text, all_token_counts
279
-
280
-
281
- def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0]):
282
- logging.info("重试中……")
283
- if len(history) == 0:
284
- yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
285
- return
286
- history.pop()
287
- inputs = history.pop()["content"]
288
- token_count.pop()
289
- iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream, selected_model=selected_model)
290
- logging.info("重试完毕")
291
- for x in iter:
292
- yield x
293
-
294
-
295
- def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0], hidden=False):
296
- logging.info("开始减少token数量……")
297
- iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, selected_model = selected_model, should_check_token_count=False)
298
- logging.info(f"chatbot: {chatbot}")
299
- for chatbot, history, status_text, previous_token_count in iter:
300
- history = history[-2:]
301
- token_count = previous_token_count[-1:]
302
- if hidden:
303
- chatbot.pop()
304
- yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
305
- logging.info("减少token数量完毕")
306
-
307
 
308
  def delete_last_conversation(chatbot, history, previous_token_count):
309
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
@@ -320,7 +89,12 @@ def delete_last_conversation(chatbot, history, previous_token_count):
320
  if len(previous_token_count) > 0:
321
  logging.info("删除了一组对话的token计数记录")
322
  previous_token_count.pop()
323
- return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
 
 
 
 
 
324
 
325
 
326
  def save_file(filename, system, history, chatbot):
@@ -340,6 +114,7 @@ def save_file(filename, system, history, chatbot):
340
  logging.info("保存对话历史完毕")
341
  return os.path.join(HISTORY_DIR, filename)
342
 
 
343
  def save_chat_history(filename, system, history, chatbot):
344
  if filename == "":
345
  return
@@ -347,6 +122,7 @@ def save_chat_history(filename, system, history, chatbot):
347
  filename += ".json"
348
  return save_file(filename, system, history, chatbot)
349
 
 
350
  def export_markdown(filename, system, history, chatbot):
351
  if filename == "":
352
  return
@@ -382,9 +158,11 @@ def load_chat_history(filename, system, history, chatbot):
382
  logging.info("没有找到对话历史文件,不执行任何操作")
383
  return filename, system, history, chatbot
384
 
 
385
  def sorted_by_pinyin(list):
386
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
387
 
 
388
  def get_file_names(dir, plain=False, filetypes=[".json"]):
389
  logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
390
  files = []
@@ -401,10 +179,12 @@ def get_file_names(dir, plain=False, filetypes=[".json"]):
401
  else:
402
  return gr.Dropdown.update(choices=files)
403
 
 
404
  def get_history_names(plain=False):
405
  logging.info("获取历史记录文件名列表")
406
  return get_file_names(HISTORY_DIR, plain)
407
 
 
408
  def load_template(filename, mode=0):
409
  logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
410
  lines = []
@@ -414,22 +194,28 @@ def load_template(filename, mode=0):
414
  lines = json.load(f)
415
  lines = [[i["act"], i["prompt"]] for i in lines]
416
  else:
417
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
 
 
418
  reader = csv.reader(csvfile)
419
  lines = list(reader)
420
  lines = lines[1:]
421
  if mode == 1:
422
  return sorted_by_pinyin([row[0] for row in lines])
423
  elif mode == 2:
424
- return {row[0]:row[1] for row in lines}
425
  else:
426
  choices = sorted_by_pinyin([row[0] for row in lines])
427
- return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
 
 
 
428
 
429
  def get_template_names(plain=False):
430
  logging.info("获取模板文件名列表")
431
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
432
 
 
433
  def get_template_content(templates, selection, original_system_prompt):
434
  logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
435
  try:
@@ -437,9 +223,62 @@ def get_template_content(templates, selection, original_system_prompt):
437
  except:
438
  return original_system_prompt
439
 
 
440
  def reset_state():
441
  logging.info("重置状态")
442
  return [], [], [], construct_token_message(0)
443
 
 
444
  def reset_textbox():
445
- return gr.update(value='')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
  import logging
5
  import json
 
 
6
  import os
7
+ import datetime
8
+ import hashlib
 
9
  import csv
10
+
11
+ import gradio as gr
12
  from pypinyin import lazy_pinyin
 
13
  import tiktoken
14
+
15
+ from presets import *
 
 
16
 
17
  # logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
18
 
 
23
  headers: List[str]
24
  data: List[List[str | int | bool]]
25
 
26
+
27
  initial_prompt = "You are a helpful assistant."
28
  API_URL = "https://api.openai.com/v1/chat/completions"
29
  HISTORY_DIR = "history"
30
  TEMPLATES_DIR = "templates"
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  def count_token(message):
34
  encoding = tiktoken.get_encoding("cl100k_base")
 
36
  length = len(encoding.encode(input_str))
37
  return length
38
 
39
+
40
  def parse_text(text):
41
+ in_code_block = False
42
+ new_lines = []
43
+ for line in text.split("\n"):
44
+ if line.strip().startswith("```"):
45
+ in_code_block = not in_code_block
46
+ if in_code_block:
47
+ if line.strip() != "":
48
+ new_lines.append(line)
 
 
 
49
  else:
50
+ new_lines.append(line)
51
+ if in_code_block:
52
+ new_lines.append("```")
53
+ text = "\n".join(new_lines)
 
 
 
 
 
 
 
 
 
 
 
 
54
  return text
55
 
56
+
57
  def construct_text(role, text):
58
  return {"role": role, "content": text}
59
 
60
+
61
  def construct_user(text):
62
  return construct_text("user", text)
63
 
64
+
65
  def construct_system(text):
66
  return construct_text("system", text)
67
 
68
+
69
  def construct_assistant(text):
70
  return construct_text("assistant", text)
71
 
72
+
73
  def construct_token_message(token, stream=False):
74
  return f"Token 计数: {token}"
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  def delete_last_conversation(chatbot, history, previous_token_count):
78
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
 
89
  if len(previous_token_count) > 0:
90
  logging.info("删除了一组对话的token计数记录")
91
  previous_token_count.pop()
92
+ return (
93
+ chatbot,
94
+ history,
95
+ previous_token_count,
96
+ construct_token_message(sum(previous_token_count)),
97
+ )
98
 
99
 
100
  def save_file(filename, system, history, chatbot):
 
114
  logging.info("保存对话历史完毕")
115
  return os.path.join(HISTORY_DIR, filename)
116
 
117
+
118
  def save_chat_history(filename, system, history, chatbot):
119
  if filename == "":
120
  return
 
122
  filename += ".json"
123
  return save_file(filename, system, history, chatbot)
124
 
125
+
126
  def export_markdown(filename, system, history, chatbot):
127
  if filename == "":
128
  return
 
158
  logging.info("没有找到对话历史文件,不执行任何操作")
159
  return filename, system, history, chatbot
160
 
161
+
162
  def sorted_by_pinyin(list):
163
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
164
 
165
+
166
  def get_file_names(dir, plain=False, filetypes=[".json"]):
167
  logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
168
  files = []
 
179
  else:
180
  return gr.Dropdown.update(choices=files)
181
 
182
+
183
  def get_history_names(plain=False):
184
  logging.info("获取历史记录文件名列表")
185
  return get_file_names(HISTORY_DIR, plain)
186
 
187
+
188
  def load_template(filename, mode=0):
189
  logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
190
  lines = []
 
194
  lines = json.load(f)
195
  lines = [[i["act"], i["prompt"]] for i in lines]
196
  else:
197
+ with open(
198
+ os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8"
199
+ ) as csvfile:
200
  reader = csv.reader(csvfile)
201
  lines = list(reader)
202
  lines = lines[1:]
203
  if mode == 1:
204
  return sorted_by_pinyin([row[0] for row in lines])
205
  elif mode == 2:
206
+ return {row[0]: row[1] for row in lines}
207
  else:
208
  choices = sorted_by_pinyin([row[0] for row in lines])
209
+ return {row[0]: row[1] for row in lines}, gr.Dropdown.update(
210
+ choices=choices, value=choices[0]
211
+ )
212
+
213
 
214
  def get_template_names(plain=False):
215
  logging.info("获取模板文件名列表")
216
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
217
 
218
+
219
  def get_template_content(templates, selection, original_system_prompt):
220
  logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
221
  try:
 
223
  except:
224
  return original_system_prompt
225
 
226
+
227
  def reset_state():
228
  logging.info("重置状态")
229
  return [], [], [], construct_token_message(0)
230
 
231
+
232
  def reset_textbox():
233
+ return gr.update(value="")
234
+
235
+
236
+ def reset_default():
237
+ global API_URL
238
+ API_URL = "https://api.openai.com/v1/chat/completions"
239
+ os.environ.pop("HTTPS_PROXY", None)
240
+ os.environ.pop("https_proxy", None)
241
+ return gr.update(value=API_URL), gr.update(value=""), "API URL 和代理已重置"
242
+
243
+
244
+ def change_api_url(url):
245
+ global API_URL
246
+ API_URL = url
247
+ msg = f"API地址更改为了{url}"
248
+ logging.info(msg)
249
+ return msg
250
+
251
+
252
+ def change_proxy(proxy):
253
+ os.environ["HTTPS_PROXY"] = proxy
254
+ msg = f"代理更改为了{proxy}"
255
+ logging.info(msg)
256
+ return msg
257
+
258
+
259
+ def hide_middle_chars(s):
260
+ if len(s) <= 8:
261
+ return s
262
+ else:
263
+ head = s[:4]
264
+ tail = s[-4:]
265
+ hidden = "*" * (len(s) - 8)
266
+ return head + hidden + tail
267
+
268
+
269
+ def submit_key(key):
270
+ key = key.strip()
271
+ msg = f"API密钥更改为了{hide_middle_chars(key)}"
272
+ logging.info(msg)
273
+ return key, msg
274
+
275
+
276
+ def sha1sum(filename):
277
+ sha1 = hashlib.sha1()
278
+ sha1.update(filename.encode("utf-8"))
279
+ return sha1.hexdigest()
280
+
281
+
282
+ def replace_today(prompt):
283
+ today = datetime.datetime.today().strftime("%Y-%m-%d")
284
+ return prompt.replace("{current_date}", today)