Spaces:
Runtime error
Runtime error
add new features from the authors
Browse files- README.md +5 -7
- app.py +155 -142
- assets/custom.css +192 -0
- modules/__pycache__/chat_func.cpython-39.pyc +0 -0
- modules/__pycache__/llama_func.cpython-39.pyc +0 -0
- modules/__pycache__/openai_func.cpython-39.pyc +0 -0
- modules/__pycache__/overwrites.cpython-39.pyc +0 -0
- modules/__pycache__/presets.cpython-39.pyc +0 -0
- modules/__pycache__/shared.cpython-39.pyc +0 -0
- modules/__pycache__/utils.cpython-39.pyc +0 -0
- modules/chat_func.py +473 -0
- modules/llama_func.py +201 -0
- modules/openai_func.py +70 -0
- modules/overwrites.py +56 -0
- modules/presets.py +165 -0
- modules/shared.py +24 -0
- modules/utils.py +436 -0
- requirements.txt +0 -1
- templates/3 川虎的Prompts.json +0 -14
README.md
CHANGED
@@ -1,14 +1,12 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.23.0
|
8 |
-
app_file:
|
9 |
pinned: false
|
10 |
license: gpl-3.0
|
11 |
-
duplicated_from: JohnSmith9982/ChuanhuChatGPT
|
12 |
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: YangyangChatGPT
|
3 |
+
emoji: 🐯
|
4 |
+
colorFrom: green
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.23.0
|
8 |
+
app_file: app.py
|
9 |
pinned: false
|
10 |
license: gpl-3.0
|
|
|
11 |
---
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
app.py
CHANGED
@@ -5,10 +5,11 @@ import sys
|
|
5 |
|
6 |
import gradio as gr
|
7 |
|
8 |
-
from utils import *
|
9 |
-
from presets import *
|
10 |
-
from overwrites import *
|
11 |
-
from chat_func import *
|
|
|
12 |
|
13 |
logging.basicConfig(
|
14 |
level=logging.DEBUG,
|
@@ -25,8 +26,9 @@ else:
|
|
25 |
|
26 |
authflag = False
|
27 |
|
28 |
-
if
|
29 |
my_api_key = os.environ.get("my_api_key")
|
|
|
30 |
if my_api_key == "empty":
|
31 |
logging.error("Please give a api key!")
|
32 |
sys.exit(1)
|
@@ -36,101 +38,44 @@ if dockerflag:
|
|
36 |
if not (isinstance(username, type(None)) or isinstance(password, type(None))):
|
37 |
authflag = True
|
38 |
else:
|
39 |
-
if (
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
):
|
44 |
-
|
45 |
-
|
46 |
-
if os.path.exists("auth.json"):
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
53 |
|
54 |
gr.Chatbot.postprocess = postprocess
|
55 |
PromptHelper.compact_text_chunks = compact_text_chunks
|
56 |
|
57 |
-
with open("custom.css", "r", encoding="utf-8") as f:
|
58 |
customCSS = f.read()
|
59 |
|
60 |
-
with gr.Blocks(
|
61 |
-
css=customCSS,
|
62 |
-
theme=gr.themes.Soft(
|
63 |
-
primary_hue=gr.themes.Color(
|
64 |
-
c50="#02C160",
|
65 |
-
c100="rgba(2, 193, 96, 0.2)",
|
66 |
-
c200="#02C160",
|
67 |
-
c300="rgba(2, 193, 96, 0.32)",
|
68 |
-
c400="rgba(2, 193, 96, 0.32)",
|
69 |
-
c500="rgba(2, 193, 96, 1.0)",
|
70 |
-
c600="rgba(2, 193, 96, 1.0)",
|
71 |
-
c700="rgba(2, 193, 96, 0.32)",
|
72 |
-
c800="rgba(2, 193, 96, 0.32)",
|
73 |
-
c900="#02C160",
|
74 |
-
c950="#02C160",
|
75 |
-
),
|
76 |
-
secondary_hue=gr.themes.Color(
|
77 |
-
c50="#576b95",
|
78 |
-
c100="#576b95",
|
79 |
-
c200="#576b95",
|
80 |
-
c300="#576b95",
|
81 |
-
c400="#576b95",
|
82 |
-
c500="#576b95",
|
83 |
-
c600="#576b95",
|
84 |
-
c700="#576b95",
|
85 |
-
c800="#576b95",
|
86 |
-
c900="#576b95",
|
87 |
-
c950="#576b95",
|
88 |
-
),
|
89 |
-
neutral_hue=gr.themes.Color(
|
90 |
-
name="gray",
|
91 |
-
c50="#f9fafb",
|
92 |
-
c100="#f3f4f6",
|
93 |
-
c200="#e5e7eb",
|
94 |
-
c300="#d1d5db",
|
95 |
-
c400="#B2B2B2",
|
96 |
-
c500="#808080",
|
97 |
-
c600="#636363",
|
98 |
-
c700="#515151",
|
99 |
-
c800="#393939",
|
100 |
-
c900="#272727",
|
101 |
-
c950="#171717",
|
102 |
-
),
|
103 |
-
radius_size=gr.themes.sizes.radius_sm,
|
104 |
-
).set(
|
105 |
-
button_primary_background_fill="#06AE56",
|
106 |
-
button_primary_background_fill_dark="#06AE56",
|
107 |
-
button_primary_background_fill_hover="#07C863",
|
108 |
-
button_primary_border_color="#06AE56",
|
109 |
-
button_primary_border_color_dark="#06AE56",
|
110 |
-
button_primary_text_color="#FFFFFF",
|
111 |
-
button_primary_text_color_dark="#FFFFFF",
|
112 |
-
button_secondary_background_fill="#F2F2F2",
|
113 |
-
button_secondary_background_fill_dark="#2B2B2B",
|
114 |
-
button_secondary_text_color="#393939",
|
115 |
-
button_secondary_text_color_dark="#FFFFFF",
|
116 |
-
# background_fill_primary="#F7F7F7",
|
117 |
-
# background_fill_primary_dark="#1F1F1F",
|
118 |
-
block_title_text_color="*primary_500",
|
119 |
-
block_title_background_fill="*primary_100",
|
120 |
-
input_background_fill="#F6F6F6",
|
121 |
-
),
|
122 |
-
) as demo:
|
123 |
history = gr.State([])
|
124 |
token_count = gr.State([])
|
125 |
promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
|
126 |
user_api_key = gr.State(my_api_key)
|
127 |
-
|
128 |
-
|
129 |
topic = gr.State("未命名对话历史记录")
|
130 |
|
131 |
with gr.Row():
|
132 |
-
gr.
|
133 |
-
|
|
|
|
|
|
|
|
|
134 |
|
135 |
with gr.Row(scale=1).style(equal_height=True):
|
136 |
with gr.Column(scale=5):
|
@@ -139,16 +84,18 @@ with gr.Blocks(
|
|
139 |
with gr.Row(scale=1):
|
140 |
with gr.Column(scale=12):
|
141 |
user_input = gr.Textbox(
|
142 |
-
show_label=False, placeholder="在这里输入"
|
143 |
).style(container=False)
|
144 |
with gr.Column(min_width=70, scale=1):
|
145 |
submitBtn = gr.Button("发送", variant="primary")
|
|
|
146 |
with gr.Row(scale=1):
|
147 |
emptyBtn = gr.Button(
|
148 |
"🧹 新的对话",
|
149 |
)
|
150 |
retryBtn = gr.Button("🔄 重新生成")
|
151 |
-
|
|
|
152 |
reduceTokenBtn = gr.Button("♻️ 总结对话")
|
153 |
|
154 |
with gr.Column():
|
@@ -162,6 +109,7 @@ with gr.Blocks(
|
|
162 |
visible=not HIDE_MY_KEY,
|
163 |
label="API-Key",
|
164 |
)
|
|
|
165 |
model_select_dropdown = gr.Dropdown(
|
166 |
label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0]
|
167 |
)
|
@@ -169,6 +117,12 @@ with gr.Blocks(
|
|
169 |
label="实时传输回答", value=True, visible=enable_streaming_option
|
170 |
)
|
171 |
use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
index_files = gr.Files(label="上传索引文件", type="file", multiple=True)
|
173 |
|
174 |
with gr.Tab(label="Prompt"):
|
@@ -234,8 +188,8 @@ with gr.Blocks(
|
|
234 |
downloadFile = gr.File(interactive=True)
|
235 |
|
236 |
with gr.Tab(label="高级"):
|
237 |
-
default_btn = gr.Button("🔙 恢复默认设置")
|
238 |
gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")
|
|
|
239 |
|
240 |
with gr.Accordion("参数", open=False):
|
241 |
top_p = gr.Slider(
|
@@ -255,35 +209,33 @@ with gr.Blocks(
|
|
255 |
label="Temperature",
|
256 |
)
|
257 |
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
|
|
274 |
|
275 |
gr.Markdown(description)
|
276 |
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
user_input.submit(
|
281 |
-
predict,
|
282 |
-
[
|
283 |
user_api_key,
|
284 |
systemPromptTxt,
|
285 |
history,
|
286 |
-
|
287 |
chatbot,
|
288 |
token_count,
|
289 |
top_p,
|
@@ -292,39 +244,52 @@ with gr.Blocks(
|
|
292 |
model_select_dropdown,
|
293 |
use_websearch_checkbox,
|
294 |
index_files,
|
|
|
295 |
],
|
296 |
-
[chatbot, history, status_display, token_count],
|
297 |
show_progress=True,
|
298 |
)
|
299 |
-
user_input.submit(reset_textbox, [], [user_input])
|
300 |
|
301 |
-
|
302 |
-
|
303 |
-
[
|
304 |
-
|
305 |
-
systemPromptTxt,
|
306 |
-
history,
|
307 |
-
user_input,
|
308 |
-
chatbot,
|
309 |
-
token_count,
|
310 |
-
top_p,
|
311 |
-
temperature,
|
312 |
-
use_streaming_checkbox,
|
313 |
-
model_select_dropdown,
|
314 |
-
use_websearch_checkbox,
|
315 |
-
index_files,
|
316 |
-
],
|
317 |
-
[chatbot, history, status_display, token_count],
|
318 |
show_progress=True,
|
319 |
)
|
320 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
|
322 |
emptyBtn.click(
|
323 |
reset_state,
|
324 |
outputs=[chatbot, history, token_count, status_display],
|
325 |
show_progress=True,
|
326 |
)
|
|
|
327 |
|
|
|
328 |
retryBtn.click(
|
329 |
retry,
|
330 |
[
|
@@ -337,10 +302,18 @@ with gr.Blocks(
|
|
337 |
temperature,
|
338 |
use_streaming_checkbox,
|
339 |
model_select_dropdown,
|
|
|
340 |
],
|
341 |
[chatbot, history, status_display, token_count],
|
342 |
show_progress=True,
|
343 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
delLastBtn.click(
|
346 |
delete_last_conversation,
|
@@ -361,10 +334,16 @@ with gr.Blocks(
|
|
361 |
temperature,
|
362 |
gr.State(0),
|
363 |
model_select_dropdown,
|
|
|
364 |
],
|
365 |
[chatbot, history, status_display, token_count],
|
366 |
show_progress=True,
|
367 |
)
|
|
|
|
|
|
|
|
|
|
|
368 |
|
369 |
# Template
|
370 |
templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
|
@@ -425,6 +404,26 @@ with gr.Blocks(
|
|
425 |
show_progress=True,
|
426 |
)
|
427 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
logging.info(
|
429 |
colorama.Back.GREEN
|
430 |
+ "\n温馨提示:访问 http://localhost:7860 查看界面"
|
@@ -438,18 +437,32 @@ if __name__ == "__main__":
|
|
438 |
# if running in Docker
|
439 |
if dockerflag:
|
440 |
if authflag:
|
441 |
-
demo.queue().launch(
|
442 |
-
server_name="0.0.0.0",
|
443 |
-
|
|
|
|
|
444 |
)
|
445 |
else:
|
446 |
-
demo.queue().launch(
|
|
|
|
|
|
|
|
|
|
|
447 |
# if not running in Docker
|
448 |
else:
|
449 |
if authflag:
|
450 |
-
demo.queue().launch(
|
|
|
|
|
|
|
|
|
|
|
451 |
else:
|
452 |
-
demo.queue(concurrency_count=
|
453 |
-
|
454 |
-
|
455 |
-
# demo.queue().launch(
|
|
|
|
|
|
5 |
|
6 |
import gradio as gr
|
7 |
|
8 |
+
from modules.utils import *
|
9 |
+
from modules.presets import *
|
10 |
+
from modules.overwrites import *
|
11 |
+
from modules.chat_func import *
|
12 |
+
from modules.openai_func import get_usage
|
13 |
|
14 |
logging.basicConfig(
|
15 |
level=logging.DEBUG,
|
|
|
26 |
|
27 |
authflag = False
|
28 |
|
29 |
+
if not my_api_key:
|
30 |
my_api_key = os.environ.get("my_api_key")
|
31 |
+
if dockerflag:
|
32 |
if my_api_key == "empty":
|
33 |
logging.error("Please give a api key!")
|
34 |
sys.exit(1)
|
|
|
38 |
if not (isinstance(username, type(None)) or isinstance(password, type(None))):
|
39 |
authflag = True
|
40 |
else:
|
41 |
+
# if (
|
42 |
+
# not my_api_key
|
43 |
+
# and os.path.exists("api_key.txt")
|
44 |
+
# and os.path.getsize("api_key.txt")
|
45 |
+
# ):
|
46 |
+
# with open("api_key.txt", "r") as f:
|
47 |
+
# my_api_key = f.read().strip()
|
48 |
+
# if os.path.exists("auth.json"):
|
49 |
+
# with open("auth.json", "r", encoding='utf-8') as f:
|
50 |
+
# auth = json.load(f)
|
51 |
+
# username = auth["username"]
|
52 |
+
# password = auth["password"]
|
53 |
+
# if username != "" and password != "":
|
54 |
+
# authflag = True
|
55 |
+
authflag = True
|
56 |
|
57 |
gr.Chatbot.postprocess = postprocess
|
58 |
PromptHelper.compact_text_chunks = compact_text_chunks
|
59 |
|
60 |
+
with open("assets/custom.css", "r", encoding="utf-8") as f:
|
61 |
customCSS = f.read()
|
62 |
|
63 |
+
with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
history = gr.State([])
|
65 |
token_count = gr.State([])
|
66 |
promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
|
67 |
user_api_key = gr.State(my_api_key)
|
68 |
+
user_question = gr.State("")
|
69 |
+
outputing = gr.State(False)
|
70 |
topic = gr.State("未命名对话历史记录")
|
71 |
|
72 |
with gr.Row():
|
73 |
+
with gr.Column(scale=1):
|
74 |
+
gr.HTML(title)
|
75 |
+
with gr.Column(scale=4):
|
76 |
+
gr.HTML('<center><a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>')
|
77 |
+
with gr.Column(scale=4):
|
78 |
+
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
79 |
|
80 |
with gr.Row(scale=1).style(equal_height=True):
|
81 |
with gr.Column(scale=5):
|
|
|
84 |
with gr.Row(scale=1):
|
85 |
with gr.Column(scale=12):
|
86 |
user_input = gr.Textbox(
|
87 |
+
show_label=False, placeholder="在这里输入", interactive=True
|
88 |
).style(container=False)
|
89 |
with gr.Column(min_width=70, scale=1):
|
90 |
submitBtn = gr.Button("发送", variant="primary")
|
91 |
+
cancelBtn = gr.Button("取消", variant="secondary", visible=False)
|
92 |
with gr.Row(scale=1):
|
93 |
emptyBtn = gr.Button(
|
94 |
"🧹 新的对话",
|
95 |
)
|
96 |
retryBtn = gr.Button("🔄 重新生成")
|
97 |
+
delFirstBtn = gr.Button("🗑️ 删除最旧对话")
|
98 |
+
delLastBtn = gr.Button("🗑️ 删除最新对话")
|
99 |
reduceTokenBtn = gr.Button("♻️ 总结对话")
|
100 |
|
101 |
with gr.Column():
|
|
|
109 |
visible=not HIDE_MY_KEY,
|
110 |
label="API-Key",
|
111 |
)
|
112 |
+
usageTxt = gr.Markdown(get_usage(my_api_key), elem_id="usage_display")
|
113 |
model_select_dropdown = gr.Dropdown(
|
114 |
label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0]
|
115 |
)
|
|
|
117 |
label="实时传输回答", value=True, visible=enable_streaming_option
|
118 |
)
|
119 |
use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
|
120 |
+
language_select_dropdown = gr.Dropdown(
|
121 |
+
label="选择回复语言(针对搜索&索引功能)",
|
122 |
+
choices=REPLY_LANGUAGES,
|
123 |
+
multiselect=False,
|
124 |
+
value=REPLY_LANGUAGES[0],
|
125 |
+
)
|
126 |
index_files = gr.Files(label="上传索引文件", type="file", multiple=True)
|
127 |
|
128 |
with gr.Tab(label="Prompt"):
|
|
|
188 |
downloadFile = gr.File(interactive=True)
|
189 |
|
190 |
with gr.Tab(label="高级"):
|
|
|
191 |
gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")
|
192 |
+
default_btn = gr.Button("🔙 恢复默认设置")
|
193 |
|
194 |
with gr.Accordion("参数", open=False):
|
195 |
top_p = gr.Slider(
|
|
|
209 |
label="Temperature",
|
210 |
)
|
211 |
|
212 |
+
with gr.Accordion("网络设置", open=False):
|
213 |
+
apiurlTxt = gr.Textbox(
|
214 |
+
show_label=True,
|
215 |
+
placeholder=f"在这里输入API地址...",
|
216 |
+
label="API地址",
|
217 |
+
value="https://api.openai.com/v1/chat/completions",
|
218 |
+
lines=2,
|
219 |
+
)
|
220 |
+
changeAPIURLBtn = gr.Button("🔄 切换API地址")
|
221 |
+
proxyTxt = gr.Textbox(
|
222 |
+
show_label=True,
|
223 |
+
placeholder=f"在这里输入代理地址...",
|
224 |
+
label="代理地址(示例:http://127.0.0.1:10809)",
|
225 |
+
value="",
|
226 |
+
lines=2,
|
227 |
+
)
|
228 |
+
changeProxyBtn = gr.Button("🔄 设置代理地址")
|
229 |
|
230 |
gr.Markdown(description)
|
231 |
|
232 |
+
chatgpt_predict_args = dict(
|
233 |
+
fn=predict,
|
234 |
+
inputs=[
|
|
|
|
|
|
|
235 |
user_api_key,
|
236 |
systemPromptTxt,
|
237 |
history,
|
238 |
+
user_question,
|
239 |
chatbot,
|
240 |
token_count,
|
241 |
top_p,
|
|
|
244 |
model_select_dropdown,
|
245 |
use_websearch_checkbox,
|
246 |
index_files,
|
247 |
+
language_select_dropdown,
|
248 |
],
|
249 |
+
outputs=[chatbot, history, status_display, token_count],
|
250 |
show_progress=True,
|
251 |
)
|
|
|
252 |
|
253 |
+
start_outputing_args = dict(
|
254 |
+
fn=start_outputing,
|
255 |
+
inputs=[],
|
256 |
+
outputs=[submitBtn, cancelBtn],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
show_progress=True,
|
258 |
)
|
259 |
+
|
260 |
+
end_outputing_args = dict(
|
261 |
+
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
|
262 |
+
)
|
263 |
+
|
264 |
+
reset_textbox_args = dict(
|
265 |
+
fn=reset_textbox, inputs=[], outputs=[user_input]
|
266 |
+
)
|
267 |
+
|
268 |
+
transfer_input_args = dict(
|
269 |
+
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input], show_progress=True
|
270 |
+
)
|
271 |
+
|
272 |
+
get_usage_args = dict(
|
273 |
+
fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False
|
274 |
+
)
|
275 |
+
|
276 |
+
# Chatbot
|
277 |
+
cancelBtn.click(cancel_outputing, [], [])
|
278 |
+
|
279 |
+
user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
|
280 |
+
user_input.submit(**get_usage_args)
|
281 |
+
|
282 |
+
submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
|
283 |
+
submitBtn.click(**get_usage_args)
|
284 |
|
285 |
emptyBtn.click(
|
286 |
reset_state,
|
287 |
outputs=[chatbot, history, token_count, status_display],
|
288 |
show_progress=True,
|
289 |
)
|
290 |
+
emptyBtn.click(**reset_textbox_args)
|
291 |
|
292 |
+
retryBtn.click(**reset_textbox_args)
|
293 |
retryBtn.click(
|
294 |
retry,
|
295 |
[
|
|
|
302 |
temperature,
|
303 |
use_streaming_checkbox,
|
304 |
model_select_dropdown,
|
305 |
+
language_select_dropdown,
|
306 |
],
|
307 |
[chatbot, history, status_display, token_count],
|
308 |
show_progress=True,
|
309 |
)
|
310 |
+
retryBtn.click(**get_usage_args)
|
311 |
+
|
312 |
+
delFirstBtn.click(
|
313 |
+
delete_first_conversation,
|
314 |
+
[history, token_count],
|
315 |
+
[history, token_count, status_display],
|
316 |
+
)
|
317 |
|
318 |
delLastBtn.click(
|
319 |
delete_last_conversation,
|
|
|
334 |
temperature,
|
335 |
gr.State(0),
|
336 |
model_select_dropdown,
|
337 |
+
language_select_dropdown,
|
338 |
],
|
339 |
[chatbot, history, status_display, token_count],
|
340 |
show_progress=True,
|
341 |
)
|
342 |
+
reduceTokenBtn.click(**get_usage_args)
|
343 |
+
|
344 |
+
# ChatGPT
|
345 |
+
keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args)
|
346 |
+
keyTxt.submit(**get_usage_args)
|
347 |
|
348 |
# Template
|
349 |
templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
|
|
|
404 |
show_progress=True,
|
405 |
)
|
406 |
|
407 |
+
# check username and password, get api key
|
408 |
+
def check_access_right(username, password):
|
409 |
+
try:
|
410 |
+
print("check", username, "-", password)
|
411 |
+
with open('encrypted.bin', 'rb') as f:
|
412 |
+
ciphertext = f.read()
|
413 |
+
key = generate_key(username, password)
|
414 |
+
decoded_api_key = decrypt(ciphertext, key)
|
415 |
+
my_api_key = decoded_api_key.decode()
|
416 |
+
submit_key(my_api_key)
|
417 |
+
keyTxt.update(my_api_key)
|
418 |
+
keyTxt.value = hide_middle_chars(my_api_key)
|
419 |
+
user_api_key.value = my_api_key
|
420 |
+
# user_passward.value = password
|
421 |
+
# user_name.value = username
|
422 |
+
return True
|
423 |
+
except Exception:
|
424 |
+
print(Exception)
|
425 |
+
return False
|
426 |
+
|
427 |
logging.info(
|
428 |
colorama.Back.GREEN
|
429 |
+ "\n温馨提示:访问 http://localhost:7860 查看界面"
|
|
|
437 |
# if running in Docker
|
438 |
if dockerflag:
|
439 |
if authflag:
|
440 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
441 |
+
server_name="0.0.0.0",
|
442 |
+
server_port=7860,
|
443 |
+
auth=(username, password),
|
444 |
+
favicon_path="./assets/favicon.ico",
|
445 |
)
|
446 |
else:
|
447 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
448 |
+
server_name="0.0.0.0",
|
449 |
+
server_port=7860,
|
450 |
+
share=False,
|
451 |
+
favicon_path="./assets/favicon.ico",
|
452 |
+
)
|
453 |
# if not running in Docker
|
454 |
else:
|
455 |
if authflag:
|
456 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
457 |
+
share=False,
|
458 |
+
auth=check_access_right,
|
459 |
+
favicon_path="./assets/favicon.ico",
|
460 |
+
inbrowser=True,
|
461 |
+
)
|
462 |
else:
|
463 |
+
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
464 |
+
share=False, favicon_path="./assets/favicon.ico", inbrowser=True
|
465 |
+
) # 改为 share=True 可以创建公开分享链接
|
466 |
+
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
|
467 |
+
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
|
468 |
+
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
|
assets/custom.css
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:root {
|
2 |
+
--chatbot-color-light: #F3F3F3;
|
3 |
+
--chatbot-color-dark: #121111;
|
4 |
+
}
|
5 |
+
|
6 |
+
/* status_display */
|
7 |
+
#status_display {
|
8 |
+
display: flex;
|
9 |
+
min-height: 2.5em;
|
10 |
+
align-items: flex-end;
|
11 |
+
justify-content: flex-end;
|
12 |
+
}
|
13 |
+
#status_display p {
|
14 |
+
font-size: .85em;
|
15 |
+
font-family: monospace;
|
16 |
+
color: var(--body-text-color-subdued);
|
17 |
+
}
|
18 |
+
|
19 |
+
#chuanhu_chatbot, #status_display {
|
20 |
+
transition: all 0.6s;
|
21 |
+
}
|
22 |
+
|
23 |
+
/* usage_display */
|
24 |
+
#usage_display {
|
25 |
+
height: 1em;
|
26 |
+
}
|
27 |
+
#usage_display p{
|
28 |
+
padding: 0 1em;
|
29 |
+
font-size: .85em;
|
30 |
+
font-family: monospace;
|
31 |
+
color: var(--body-text-color-subdued);
|
32 |
+
}
|
33 |
+
/* list */
|
34 |
+
ol:not(.options), ul:not(.options) {
|
35 |
+
padding-inline-start: 2em !important;
|
36 |
+
}
|
37 |
+
|
38 |
+
/* 亮色 */
|
39 |
+
@media (prefers-color-scheme: light) {
|
40 |
+
#chuanhu_chatbot {
|
41 |
+
background-color: var(--chatbot-color-light) !important;
|
42 |
+
color: #000000 !important;
|
43 |
+
}
|
44 |
+
[data-testid = "bot"] {
|
45 |
+
background-color: #FFFFFF !important;
|
46 |
+
}
|
47 |
+
[data-testid = "user"] {
|
48 |
+
background-color: #95EC69 !important;
|
49 |
+
}
|
50 |
+
}
|
51 |
+
/* 暗色 */
|
52 |
+
@media (prefers-color-scheme: dark) {
|
53 |
+
#chuanhu_chatbot {
|
54 |
+
background-color: var(--chatbot-color-dark) !important;
|
55 |
+
color: #FFFFFF !important;
|
56 |
+
}
|
57 |
+
[data-testid = "bot"] {
|
58 |
+
background-color: #2C2C2C !important;
|
59 |
+
}
|
60 |
+
[data-testid = "user"] {
|
61 |
+
background-color: #26B561 !important;
|
62 |
+
}
|
63 |
+
body {
|
64 |
+
background-color: var(--neutral-950) !important;
|
65 |
+
}
|
66 |
+
}
|
67 |
+
/* 对话气泡 */
|
68 |
+
[class *= "message"] {
|
69 |
+
border-radius: var(--radius-xl) !important;
|
70 |
+
border: none;
|
71 |
+
padding: var(--spacing-xl) !important;
|
72 |
+
font-size: var(--text-md) !important;
|
73 |
+
line-height: var(--line-md) !important;
|
74 |
+
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
75 |
+
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
76 |
+
}
|
77 |
+
[data-testid = "bot"] {
|
78 |
+
max-width: 85%;
|
79 |
+
border-bottom-left-radius: 0 !important;
|
80 |
+
}
|
81 |
+
[data-testid = "user"] {
|
82 |
+
max-width: 85%;
|
83 |
+
width: auto !important;
|
84 |
+
border-bottom-right-radius: 0 !important;
|
85 |
+
}
|
86 |
+
/* 表格 */
|
87 |
+
table {
|
88 |
+
margin: 1em 0;
|
89 |
+
border-collapse: collapse;
|
90 |
+
empty-cells: show;
|
91 |
+
}
|
92 |
+
td,th {
|
93 |
+
border: 1.2px solid var(--border-color-primary) !important;
|
94 |
+
padding: 0.2em;
|
95 |
+
}
|
96 |
+
thead {
|
97 |
+
background-color: rgba(175,184,193,0.2);
|
98 |
+
}
|
99 |
+
thead th {
|
100 |
+
padding: .5em .2em;
|
101 |
+
}
|
102 |
+
/* 行内代码 */
|
103 |
+
code {
|
104 |
+
display: inline;
|
105 |
+
white-space: break-spaces;
|
106 |
+
border-radius: 6px;
|
107 |
+
margin: 0 2px 0 2px;
|
108 |
+
padding: .2em .4em .1em .4em;
|
109 |
+
background-color: rgba(175,184,193,0.2);
|
110 |
+
}
|
111 |
+
/* 代码块 */
|
112 |
+
pre code {
|
113 |
+
display: block;
|
114 |
+
overflow: auto;
|
115 |
+
white-space: pre;
|
116 |
+
background-color: hsla(0, 0%, 0%, 80%)!important;
|
117 |
+
border-radius: 10px;
|
118 |
+
padding: 1.4em 1.2em 0em 1.4em;
|
119 |
+
margin: 1.2em 2em 1.2em 0.5em;
|
120 |
+
color: #FFF;
|
121 |
+
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
|
122 |
+
}
|
123 |
+
/* 代码高亮样式 */
|
124 |
+
.highlight .hll { background-color: #49483e }
|
125 |
+
.highlight .c { color: #75715e } /* Comment */
|
126 |
+
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
|
127 |
+
.highlight .k { color: #66d9ef } /* Keyword */
|
128 |
+
.highlight .l { color: #ae81ff } /* Literal */
|
129 |
+
.highlight .n { color: #f8f8f2 } /* Name */
|
130 |
+
.highlight .o { color: #f92672 } /* Operator */
|
131 |
+
.highlight .p { color: #f8f8f2 } /* Punctuation */
|
132 |
+
.highlight .ch { color: #75715e } /* Comment.Hashbang */
|
133 |
+
.highlight .cm { color: #75715e } /* Comment.Multiline */
|
134 |
+
.highlight .cp { color: #75715e } /* Comment.Preproc */
|
135 |
+
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
|
136 |
+
.highlight .c1 { color: #75715e } /* Comment.Single */
|
137 |
+
.highlight .cs { color: #75715e } /* Comment.Special */
|
138 |
+
.highlight .gd { color: #f92672 } /* Generic.Deleted */
|
139 |
+
.highlight .ge { font-style: italic } /* Generic.Emph */
|
140 |
+
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
|
141 |
+
.highlight .gs { font-weight: bold } /* Generic.Strong */
|
142 |
+
.highlight .gu { color: #75715e } /* Generic.Subheading */
|
143 |
+
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
|
144 |
+
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
|
145 |
+
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
|
146 |
+
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
|
147 |
+
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
|
148 |
+
.highlight .kt { color: #66d9ef } /* Keyword.Type */
|
149 |
+
.highlight .ld { color: #e6db74 } /* Literal.Date */
|
150 |
+
.highlight .m { color: #ae81ff } /* Literal.Number */
|
151 |
+
.highlight .s { color: #e6db74 } /* Literal.String */
|
152 |
+
.highlight .na { color: #a6e22e } /* Name.Attribute */
|
153 |
+
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
|
154 |
+
.highlight .nc { color: #a6e22e } /* Name.Class */
|
155 |
+
.highlight .no { color: #66d9ef } /* Name.Constant */
|
156 |
+
.highlight .nd { color: #a6e22e } /* Name.Decorator */
|
157 |
+
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
|
158 |
+
.highlight .ne { color: #a6e22e } /* Name.Exception */
|
159 |
+
.highlight .nf { color: #a6e22e } /* Name.Function */
|
160 |
+
.highlight .nl { color: #f8f8f2 } /* Name.Label */
|
161 |
+
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
|
162 |
+
.highlight .nx { color: #a6e22e } /* Name.Other */
|
163 |
+
.highlight .py { color: #f8f8f2 } /* Name.Property */
|
164 |
+
.highlight .nt { color: #f92672 } /* Name.Tag */
|
165 |
+
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
|
166 |
+
.highlight .ow { color: #f92672 } /* Operator.Word */
|
167 |
+
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
|
168 |
+
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
|
169 |
+
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
|
170 |
+
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
|
171 |
+
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
|
172 |
+
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
|
173 |
+
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
|
174 |
+
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
|
175 |
+
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
|
176 |
+
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
|
177 |
+
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
|
178 |
+
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
|
179 |
+
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
|
180 |
+
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
|
181 |
+
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
|
182 |
+
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
|
183 |
+
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
|
184 |
+
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
|
185 |
+
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
|
186 |
+
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
|
187 |
+
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
|
188 |
+
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
|
189 |
+
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
|
190 |
+
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
|
191 |
+
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
|
192 |
+
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
|
modules/__pycache__/chat_func.cpython-39.pyc
ADDED
Binary file (8.81 kB). View file
|
|
modules/__pycache__/llama_func.cpython-39.pyc
ADDED
Binary file (4.6 kB). View file
|
|
modules/__pycache__/openai_func.cpython-39.pyc
ADDED
Binary file (1.79 kB). View file
|
|
modules/__pycache__/overwrites.cpython-39.pyc
ADDED
Binary file (2.61 kB). View file
|
|
modules/__pycache__/presets.cpython-39.pyc
ADDED
Binary file (4.72 kB). View file
|
|
modules/__pycache__/shared.cpython-39.pyc
ADDED
Binary file (1.08 kB). View file
|
|
modules/__pycache__/utils.cpython-39.pyc
ADDED
Binary file (14 kB). View file
|
|
modules/chat_func.py
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import TYPE_CHECKING, List
|
4 |
+
|
5 |
+
import logging
|
6 |
+
import json
|
7 |
+
import os
|
8 |
+
import requests
|
9 |
+
import urllib3
|
10 |
+
|
11 |
+
from tqdm import tqdm
|
12 |
+
import colorama
|
13 |
+
from duckduckgo_search import ddg
|
14 |
+
import asyncio
|
15 |
+
import aiohttp
|
16 |
+
|
17 |
+
from modules.presets import *
|
18 |
+
from modules.llama_func import *
|
19 |
+
from modules.utils import *
|
20 |
+
import modules.shared as shared
|
21 |
+
|
22 |
+
# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from typing import TypedDict
|
26 |
+
|
27 |
+
class DataframeData(TypedDict):
|
28 |
+
headers: List[str]
|
29 |
+
data: List[List[str | int | bool]]
|
30 |
+
|
31 |
+
|
32 |
+
initial_prompt = "You are a helpful assistant."
|
33 |
+
HISTORY_DIR = "history"
|
34 |
+
TEMPLATES_DIR = "templates"
|
35 |
+
|
36 |
+
def get_response(
|
37 |
+
openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model
|
38 |
+
):
|
39 |
+
headers = {
|
40 |
+
"Content-Type": "application/json",
|
41 |
+
"Authorization": f"Bearer {openai_api_key}",
|
42 |
+
}
|
43 |
+
|
44 |
+
history = [construct_system(system_prompt), *history]
|
45 |
+
|
46 |
+
payload = {
|
47 |
+
"model": selected_model,
|
48 |
+
"messages": history, # [{"role": "user", "content": f"{inputs}"}],
|
49 |
+
"temperature": temperature, # 1.0,
|
50 |
+
"top_p": top_p, # 1.0,
|
51 |
+
"n": 1,
|
52 |
+
"stream": stream,
|
53 |
+
"presence_penalty": 0,
|
54 |
+
"frequency_penalty": 0,
|
55 |
+
}
|
56 |
+
if stream:
|
57 |
+
timeout = timeout_streaming
|
58 |
+
else:
|
59 |
+
timeout = timeout_all
|
60 |
+
|
61 |
+
# 获取环境变量中的代理设置
|
62 |
+
http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy")
|
63 |
+
https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy")
|
64 |
+
|
65 |
+
# 如果存在代理设置,使用它们
|
66 |
+
proxies = {}
|
67 |
+
if http_proxy:
|
68 |
+
logging.info(f"使用 HTTP 代理: {http_proxy}")
|
69 |
+
proxies["http"] = http_proxy
|
70 |
+
if https_proxy:
|
71 |
+
logging.info(f"使用 HTTPS 代理: {https_proxy}")
|
72 |
+
proxies["https"] = https_proxy
|
73 |
+
|
74 |
+
# 如果有自定义的api-url,使用自定义url发送请求,否则使用默认设置发送请求
|
75 |
+
if shared.state.api_url != API_URL:
|
76 |
+
logging.info(f"使用自定义API URL: {shared.state.api_url}")
|
77 |
+
if proxies:
|
78 |
+
response = requests.post(
|
79 |
+
shared.state.api_url,
|
80 |
+
headers=headers,
|
81 |
+
json=payload,
|
82 |
+
stream=True,
|
83 |
+
timeout=timeout,
|
84 |
+
proxies=proxies,
|
85 |
+
)
|
86 |
+
else:
|
87 |
+
response = requests.post(
|
88 |
+
shared.state.api_url,
|
89 |
+
headers=headers,
|
90 |
+
json=payload,
|
91 |
+
stream=True,
|
92 |
+
timeout=timeout,
|
93 |
+
)
|
94 |
+
return response
|
95 |
+
|
96 |
+
|
97 |
+
def stream_predict(
|
98 |
+
openai_api_key,
|
99 |
+
system_prompt,
|
100 |
+
history,
|
101 |
+
inputs,
|
102 |
+
chatbot,
|
103 |
+
all_token_counts,
|
104 |
+
top_p,
|
105 |
+
temperature,
|
106 |
+
selected_model,
|
107 |
+
fake_input=None,
|
108 |
+
display_append=""
|
109 |
+
):
|
110 |
+
def get_return_value():
|
111 |
+
return chatbot, history, status_text, all_token_counts
|
112 |
+
|
113 |
+
logging.info("实时回答模式")
|
114 |
+
partial_words = ""
|
115 |
+
counter = 0
|
116 |
+
status_text = "开始实时传输回答……"
|
117 |
+
history.append(construct_user(inputs))
|
118 |
+
history.append(construct_assistant(""))
|
119 |
+
if fake_input:
|
120 |
+
chatbot.append((fake_input, ""))
|
121 |
+
else:
|
122 |
+
chatbot.append((inputs, ""))
|
123 |
+
user_token_count = 0
|
124 |
+
if len(all_token_counts) == 0:
|
125 |
+
system_prompt_token_count = count_token(construct_system(system_prompt))
|
126 |
+
user_token_count = (
|
127 |
+
count_token(construct_user(inputs)) + system_prompt_token_count
|
128 |
+
)
|
129 |
+
else:
|
130 |
+
user_token_count = count_token(construct_user(inputs))
|
131 |
+
all_token_counts.append(user_token_count)
|
132 |
+
logging.info(f"输入token计数: {user_token_count}")
|
133 |
+
yield get_return_value()
|
134 |
+
try:
|
135 |
+
response = get_response(
|
136 |
+
openai_api_key,
|
137 |
+
system_prompt,
|
138 |
+
history,
|
139 |
+
temperature,
|
140 |
+
top_p,
|
141 |
+
True,
|
142 |
+
selected_model,
|
143 |
+
)
|
144 |
+
except requests.exceptions.ConnectTimeout:
|
145 |
+
status_text = (
|
146 |
+
standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
147 |
+
)
|
148 |
+
yield get_return_value()
|
149 |
+
return
|
150 |
+
except requests.exceptions.ReadTimeout:
|
151 |
+
status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
|
152 |
+
yield get_return_value()
|
153 |
+
return
|
154 |
+
|
155 |
+
yield get_return_value()
|
156 |
+
error_json_str = ""
|
157 |
+
|
158 |
+
for chunk in response.iter_lines():
|
159 |
+
if counter == 0:
|
160 |
+
counter += 1
|
161 |
+
continue
|
162 |
+
counter += 1
|
163 |
+
# check whether each line is non-empty
|
164 |
+
if chunk:
|
165 |
+
chunk = chunk.decode()
|
166 |
+
chunklength = len(chunk)
|
167 |
+
try:
|
168 |
+
chunk = json.loads(chunk[6:])
|
169 |
+
except json.JSONDecodeError:
|
170 |
+
logging.info(chunk)
|
171 |
+
error_json_str += chunk
|
172 |
+
status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
|
173 |
+
yield get_return_value()
|
174 |
+
continue
|
175 |
+
# decode each line as response data is in bytes
|
176 |
+
if chunklength > 6 and "delta" in chunk["choices"][0]:
|
177 |
+
finish_reason = chunk["choices"][0]["finish_reason"]
|
178 |
+
status_text = construct_token_message(
|
179 |
+
sum(all_token_counts), stream=True
|
180 |
+
)
|
181 |
+
if finish_reason == "stop":
|
182 |
+
yield get_return_value()
|
183 |
+
break
|
184 |
+
try:
|
185 |
+
partial_words = (
|
186 |
+
partial_words + chunk["choices"][0]["delta"]["content"]
|
187 |
+
)
|
188 |
+
except KeyError:
|
189 |
+
status_text = (
|
190 |
+
standard_error_msg
|
191 |
+
+ "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: "
|
192 |
+
+ str(sum(all_token_counts))
|
193 |
+
)
|
194 |
+
yield get_return_value()
|
195 |
+
break
|
196 |
+
history[-1] = construct_assistant(partial_words)
|
197 |
+
chatbot[-1] = (chatbot[-1][0], partial_words+display_append)
|
198 |
+
all_token_counts[-1] += 1
|
199 |
+
yield get_return_value()
|
200 |
+
|
201 |
+
|
202 |
+
def predict_all(
|
203 |
+
openai_api_key,
|
204 |
+
system_prompt,
|
205 |
+
history,
|
206 |
+
inputs,
|
207 |
+
chatbot,
|
208 |
+
all_token_counts,
|
209 |
+
top_p,
|
210 |
+
temperature,
|
211 |
+
selected_model,
|
212 |
+
fake_input=None,
|
213 |
+
display_append=""
|
214 |
+
):
|
215 |
+
logging.info("一次性回答模式")
|
216 |
+
history.append(construct_user(inputs))
|
217 |
+
history.append(construct_assistant(""))
|
218 |
+
if fake_input:
|
219 |
+
chatbot.append((fake_input, ""))
|
220 |
+
else:
|
221 |
+
chatbot.append((inputs, ""))
|
222 |
+
all_token_counts.append(count_token(construct_user(inputs)))
|
223 |
+
try:
|
224 |
+
response = get_response(
|
225 |
+
openai_api_key,
|
226 |
+
system_prompt,
|
227 |
+
history,
|
228 |
+
temperature,
|
229 |
+
top_p,
|
230 |
+
False,
|
231 |
+
selected_model,
|
232 |
+
)
|
233 |
+
except requests.exceptions.ConnectTimeout:
|
234 |
+
status_text = (
|
235 |
+
standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
236 |
+
)
|
237 |
+
return chatbot, history, status_text, all_token_counts
|
238 |
+
except requests.exceptions.ProxyError:
|
239 |
+
status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
|
240 |
+
return chatbot, history, status_text, all_token_counts
|
241 |
+
except requests.exceptions.SSLError:
|
242 |
+
status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
|
243 |
+
return chatbot, history, status_text, all_token_counts
|
244 |
+
response = json.loads(response.text)
|
245 |
+
content = response["choices"][0]["message"]["content"]
|
246 |
+
history[-1] = construct_assistant(content)
|
247 |
+
chatbot[-1] = (chatbot[-1][0], content+display_append)
|
248 |
+
total_token_count = response["usage"]["total_tokens"]
|
249 |
+
all_token_counts[-1] = total_token_count - sum(all_token_counts)
|
250 |
+
status_text = construct_token_message(total_token_count)
|
251 |
+
return chatbot, history, status_text, all_token_counts
|
252 |
+
|
253 |
+
|
254 |
+
def predict(
|
255 |
+
openai_api_key,
|
256 |
+
system_prompt,
|
257 |
+
history,
|
258 |
+
inputs,
|
259 |
+
chatbot,
|
260 |
+
all_token_counts,
|
261 |
+
top_p,
|
262 |
+
temperature,
|
263 |
+
stream=False,
|
264 |
+
selected_model=MODELS[0],
|
265 |
+
use_websearch=False,
|
266 |
+
files = None,
|
267 |
+
reply_language="中文",
|
268 |
+
should_check_token_count=True,
|
269 |
+
): # repetition_penalty, top_k
|
270 |
+
logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
271 |
+
yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts
|
272 |
+
if reply_language == "跟随问题语言(不稳定)":
|
273 |
+
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
274 |
+
if files:
|
275 |
+
msg = "加载索引中……(这可能需要几分钟)"
|
276 |
+
logging.info(msg)
|
277 |
+
yield chatbot+[(inputs, "")], history, msg, all_token_counts
|
278 |
+
index = construct_index(openai_api_key, file_src=files)
|
279 |
+
msg = "索引构建完成,获取回答中……"
|
280 |
+
yield chatbot+[(inputs, "")], history, msg, all_token_counts
|
281 |
+
history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot, reply_language)
|
282 |
+
yield chatbot, history, status_text, all_token_counts
|
283 |
+
return
|
284 |
+
|
285 |
+
old_inputs = ""
|
286 |
+
link_references = []
|
287 |
+
if use_websearch:
|
288 |
+
search_results = ddg(inputs, max_results=5)
|
289 |
+
old_inputs = inputs
|
290 |
+
web_results = []
|
291 |
+
for idx, result in enumerate(search_results):
|
292 |
+
logging.info(f"搜索结果{idx + 1}:{result}")
|
293 |
+
domain_name = urllib3.util.parse_url(result["href"]).host
|
294 |
+
web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
|
295 |
+
link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n")
|
296 |
+
link_references = "\n\n" + "".join(link_references)
|
297 |
+
inputs = (
|
298 |
+
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
299 |
+
.replace("{query}", inputs)
|
300 |
+
.replace("{web_results}", "\n\n".join(web_results))
|
301 |
+
.replace("{reply_language}", reply_language )
|
302 |
+
)
|
303 |
+
else:
|
304 |
+
link_references = ""
|
305 |
+
|
306 |
+
if len(openai_api_key) != 51:
|
307 |
+
status_text = standard_error_msg + no_apikey_msg
|
308 |
+
logging.info(status_text)
|
309 |
+
chatbot.append((inputs, ""))
|
310 |
+
if len(history) == 0:
|
311 |
+
history.append(construct_user(inputs))
|
312 |
+
history.append("")
|
313 |
+
all_token_counts.append(0)
|
314 |
+
else:
|
315 |
+
history[-2] = construct_user(inputs)
|
316 |
+
yield chatbot+[(inputs, "")], history, status_text, all_token_counts
|
317 |
+
return
|
318 |
+
elif len(inputs.strip()) == 0:
|
319 |
+
status_text = standard_error_msg + no_input_msg
|
320 |
+
logging.info(status_text)
|
321 |
+
yield chatbot+[(inputs, "")], history, status_text, all_token_counts
|
322 |
+
return
|
323 |
+
|
324 |
+
if stream:
|
325 |
+
logging.info("使用流式传输")
|
326 |
+
iter = stream_predict(
|
327 |
+
openai_api_key,
|
328 |
+
system_prompt,
|
329 |
+
history,
|
330 |
+
inputs,
|
331 |
+
chatbot,
|
332 |
+
all_token_counts,
|
333 |
+
top_p,
|
334 |
+
temperature,
|
335 |
+
selected_model,
|
336 |
+
fake_input=old_inputs,
|
337 |
+
display_append=link_references
|
338 |
+
)
|
339 |
+
for chatbot, history, status_text, all_token_counts in iter:
|
340 |
+
if shared.state.interrupted:
|
341 |
+
shared.state.recover()
|
342 |
+
return
|
343 |
+
yield chatbot, history, status_text, all_token_counts
|
344 |
+
else:
|
345 |
+
logging.info("不使用流式传输")
|
346 |
+
chatbot, history, status_text, all_token_counts = predict_all(
|
347 |
+
openai_api_key,
|
348 |
+
system_prompt,
|
349 |
+
history,
|
350 |
+
inputs,
|
351 |
+
chatbot,
|
352 |
+
all_token_counts,
|
353 |
+
top_p,
|
354 |
+
temperature,
|
355 |
+
selected_model,
|
356 |
+
fake_input=old_inputs,
|
357 |
+
display_append=link_references
|
358 |
+
)
|
359 |
+
yield chatbot, history, status_text, all_token_counts
|
360 |
+
|
361 |
+
logging.info(f"传输完毕。当前token计数为{all_token_counts}")
|
362 |
+
if len(history) > 1 and history[-1]["content"] != inputs:
|
363 |
+
logging.info(
|
364 |
+
"回答为:"
|
365 |
+
+ colorama.Fore.BLUE
|
366 |
+
+ f"{history[-1]['content']}"
|
367 |
+
+ colorama.Style.RESET_ALL
|
368 |
+
)
|
369 |
+
|
370 |
+
if stream:
|
371 |
+
max_token = max_token_streaming
|
372 |
+
else:
|
373 |
+
max_token = max_token_all
|
374 |
+
|
375 |
+
if sum(all_token_counts) > max_token and should_check_token_count:
|
376 |
+
status_text = f"精简token中{all_token_counts}/{max_token}"
|
377 |
+
logging.info(status_text)
|
378 |
+
yield chatbot, history, status_text, all_token_counts
|
379 |
+
iter = reduce_token_size(
|
380 |
+
openai_api_key,
|
381 |
+
system_prompt,
|
382 |
+
history,
|
383 |
+
chatbot,
|
384 |
+
all_token_counts,
|
385 |
+
top_p,
|
386 |
+
temperature,
|
387 |
+
max_token//2,
|
388 |
+
selected_model=selected_model,
|
389 |
+
)
|
390 |
+
for chatbot, history, status_text, all_token_counts in iter:
|
391 |
+
status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
|
392 |
+
yield chatbot, history, status_text, all_token_counts
|
393 |
+
|
394 |
+
|
395 |
+
def retry(
|
396 |
+
openai_api_key,
|
397 |
+
system_prompt,
|
398 |
+
history,
|
399 |
+
chatbot,
|
400 |
+
token_count,
|
401 |
+
top_p,
|
402 |
+
temperature,
|
403 |
+
stream=False,
|
404 |
+
selected_model=MODELS[0],
|
405 |
+
reply_language="中文",
|
406 |
+
):
|
407 |
+
logging.info("重试中……")
|
408 |
+
if len(history) == 0:
|
409 |
+
yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
|
410 |
+
return
|
411 |
+
history.pop()
|
412 |
+
inputs = history.pop()["content"]
|
413 |
+
token_count.pop()
|
414 |
+
iter = predict(
|
415 |
+
openai_api_key,
|
416 |
+
system_prompt,
|
417 |
+
history,
|
418 |
+
inputs,
|
419 |
+
chatbot,
|
420 |
+
token_count,
|
421 |
+
top_p,
|
422 |
+
temperature,
|
423 |
+
stream=stream,
|
424 |
+
selected_model=selected_model,
|
425 |
+
reply_language=reply_language,
|
426 |
+
)
|
427 |
+
logging.info("重试中……")
|
428 |
+
for x in iter:
|
429 |
+
yield x
|
430 |
+
logging.info("重试完毕")
|
431 |
+
|
432 |
+
|
433 |
+
def reduce_token_size(
|
434 |
+
openai_api_key,
|
435 |
+
system_prompt,
|
436 |
+
history,
|
437 |
+
chatbot,
|
438 |
+
token_count,
|
439 |
+
top_p,
|
440 |
+
temperature,
|
441 |
+
max_token_count,
|
442 |
+
selected_model=MODELS[0],
|
443 |
+
reply_language="中文",
|
444 |
+
):
|
445 |
+
logging.info("开始减少token数量……")
|
446 |
+
iter = predict(
|
447 |
+
openai_api_key,
|
448 |
+
system_prompt,
|
449 |
+
history,
|
450 |
+
summarize_prompt,
|
451 |
+
chatbot,
|
452 |
+
token_count,
|
453 |
+
top_p,
|
454 |
+
temperature,
|
455 |
+
selected_model=selected_model,
|
456 |
+
should_check_token_count=False,
|
457 |
+
reply_language=reply_language,
|
458 |
+
)
|
459 |
+
logging.info(f"chatbot: {chatbot}")
|
460 |
+
flag = False
|
461 |
+
for chatbot, history, status_text, previous_token_count in iter:
|
462 |
+
num_chat = find_n(previous_token_count, max_token_count)
|
463 |
+
if flag:
|
464 |
+
chatbot = chatbot[:-1]
|
465 |
+
flag = True
|
466 |
+
history = history[-2*num_chat:] if num_chat > 0 else []
|
467 |
+
token_count = previous_token_count[-num_chat:] if num_chat > 0 else []
|
468 |
+
msg = f"保留了最近{num_chat}轮对话"
|
469 |
+
yield chatbot, history, msg + "," + construct_token_message(
|
470 |
+
sum(token_count) if len(token_count) > 0 else 0,
|
471 |
+
), token_count
|
472 |
+
logging.info(msg)
|
473 |
+
logging.info("减少token数量完毕")
|
modules/llama_func.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
|
4 |
+
from llama_index import GPTSimpleVectorIndex
|
5 |
+
from llama_index import download_loader
|
6 |
+
from llama_index import (
|
7 |
+
Document,
|
8 |
+
LLMPredictor,
|
9 |
+
PromptHelper,
|
10 |
+
QuestionAnswerPrompt,
|
11 |
+
RefinePrompt,
|
12 |
+
)
|
13 |
+
from langchain.llms import OpenAI
|
14 |
+
import colorama
|
15 |
+
|
16 |
+
from modules.presets import *
|
17 |
+
from modules.utils import *
|
18 |
+
|
19 |
+
def get_index_name(file_src):
|
20 |
+
index_name = []
|
21 |
+
for file in file_src:
|
22 |
+
index_name.append(os.path.basename(file.name))
|
23 |
+
index_name = sorted(index_name)
|
24 |
+
index_name = "".join(index_name)
|
25 |
+
index_name = sha1sum(index_name)
|
26 |
+
return index_name
|
27 |
+
|
28 |
+
def get_documents(file_src):
|
29 |
+
documents = []
|
30 |
+
logging.debug("Loading documents...")
|
31 |
+
logging.debug(f"file_src: {file_src}")
|
32 |
+
for file in file_src:
|
33 |
+
logging.info(f"loading file: {file.name}")
|
34 |
+
if os.path.splitext(file.name)[1] == ".pdf":
|
35 |
+
logging.debug("Loading PDF...")
|
36 |
+
CJKPDFReader = download_loader("CJKPDFReader")
|
37 |
+
loader = CJKPDFReader()
|
38 |
+
text_raw = loader.load_data(file=file.name)[0].text
|
39 |
+
elif os.path.splitext(file.name)[1] == ".docx":
|
40 |
+
logging.debug("Loading DOCX...")
|
41 |
+
DocxReader = download_loader("DocxReader")
|
42 |
+
loader = DocxReader()
|
43 |
+
text_raw = loader.load_data(file=file.name)[0].text
|
44 |
+
elif os.path.splitext(file.name)[1] == ".epub":
|
45 |
+
logging.debug("Loading EPUB...")
|
46 |
+
EpubReader = download_loader("EpubReader")
|
47 |
+
loader = EpubReader()
|
48 |
+
text_raw = loader.load_data(file=file.name)[0].text
|
49 |
+
else:
|
50 |
+
logging.debug("Loading text file...")
|
51 |
+
with open(file.name, "r", encoding="utf-8") as f:
|
52 |
+
text_raw = f.read()
|
53 |
+
text = add_space(text_raw)
|
54 |
+
documents += [Document(text)]
|
55 |
+
return documents
|
56 |
+
|
57 |
+
|
58 |
+
def construct_index(
|
59 |
+
api_key,
|
60 |
+
file_src,
|
61 |
+
max_input_size=4096,
|
62 |
+
num_outputs=1,
|
63 |
+
max_chunk_overlap=20,
|
64 |
+
chunk_size_limit=600,
|
65 |
+
embedding_limit=None,
|
66 |
+
separator=" ",
|
67 |
+
num_children=10,
|
68 |
+
max_keywords_per_chunk=10,
|
69 |
+
):
|
70 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
71 |
+
chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
|
72 |
+
embedding_limit = None if embedding_limit == 0 else embedding_limit
|
73 |
+
separator = " " if separator == "" else separator
|
74 |
+
|
75 |
+
llm_predictor = LLMPredictor(
|
76 |
+
llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
|
77 |
+
)
|
78 |
+
prompt_helper = PromptHelper(
|
79 |
+
max_input_size,
|
80 |
+
num_outputs,
|
81 |
+
max_chunk_overlap,
|
82 |
+
embedding_limit,
|
83 |
+
chunk_size_limit,
|
84 |
+
separator=separator,
|
85 |
+
)
|
86 |
+
index_name = get_index_name(file_src)
|
87 |
+
if os.path.exists(f"./index/{index_name}.json"):
|
88 |
+
logging.info("找到了缓存的索引文件,加载中……")
|
89 |
+
return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
|
90 |
+
else:
|
91 |
+
try:
|
92 |
+
documents = get_documents(file_src)
|
93 |
+
logging.debug("构建索引中……")
|
94 |
+
index = GPTSimpleVectorIndex(
|
95 |
+
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
|
96 |
+
)
|
97 |
+
os.makedirs("./index", exist_ok=True)
|
98 |
+
index.save_to_disk(f"./index/{index_name}.json")
|
99 |
+
return index
|
100 |
+
except Exception as e:
|
101 |
+
print(e)
|
102 |
+
return None
|
103 |
+
|
104 |
+
|
105 |
+
def chat_ai(
|
106 |
+
api_key,
|
107 |
+
index,
|
108 |
+
question,
|
109 |
+
context,
|
110 |
+
chatbot,
|
111 |
+
reply_language,
|
112 |
+
):
|
113 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
114 |
+
|
115 |
+
logging.info(f"Question: {question}")
|
116 |
+
|
117 |
+
response, chatbot_display, status_text = ask_ai(
|
118 |
+
api_key,
|
119 |
+
index,
|
120 |
+
question,
|
121 |
+
replace_today(PROMPT_TEMPLATE),
|
122 |
+
REFINE_TEMPLATE,
|
123 |
+
SIM_K,
|
124 |
+
INDEX_QUERY_TEMPRATURE,
|
125 |
+
context,
|
126 |
+
reply_language,
|
127 |
+
)
|
128 |
+
if response is None:
|
129 |
+
status_text = "查询失败,请换个问法试试"
|
130 |
+
return context, chatbot
|
131 |
+
response = response
|
132 |
+
|
133 |
+
context.append({"role": "user", "content": question})
|
134 |
+
context.append({"role": "assistant", "content": response})
|
135 |
+
chatbot.append((question, chatbot_display))
|
136 |
+
|
137 |
+
os.environ["OPENAI_API_KEY"] = ""
|
138 |
+
return context, chatbot, status_text
|
139 |
+
|
140 |
+
|
141 |
+
def ask_ai(
|
142 |
+
api_key,
|
143 |
+
index,
|
144 |
+
question,
|
145 |
+
prompt_tmpl,
|
146 |
+
refine_tmpl,
|
147 |
+
sim_k=1,
|
148 |
+
temprature=0,
|
149 |
+
prefix_messages=[],
|
150 |
+
reply_language="中文",
|
151 |
+
):
|
152 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
153 |
+
|
154 |
+
logging.debug("Index file found")
|
155 |
+
logging.debug("Querying index...")
|
156 |
+
llm_predictor = LLMPredictor(
|
157 |
+
llm=OpenAI(
|
158 |
+
temperature=temprature,
|
159 |
+
model_name="gpt-3.5-turbo-0301",
|
160 |
+
prefix_messages=prefix_messages,
|
161 |
+
)
|
162 |
+
)
|
163 |
+
|
164 |
+
response = None # Initialize response variable to avoid UnboundLocalError
|
165 |
+
qa_prompt = QuestionAnswerPrompt(prompt_tmpl.replace("{reply_language}", reply_language))
|
166 |
+
rf_prompt = RefinePrompt(refine_tmpl.replace("{reply_language}", reply_language))
|
167 |
+
response = index.query(
|
168 |
+
question,
|
169 |
+
llm_predictor=llm_predictor,
|
170 |
+
similarity_top_k=sim_k,
|
171 |
+
text_qa_template=qa_prompt,
|
172 |
+
refine_template=rf_prompt,
|
173 |
+
response_mode="compact",
|
174 |
+
)
|
175 |
+
|
176 |
+
if response is not None:
|
177 |
+
logging.info(f"Response: {response}")
|
178 |
+
ret_text = response.response
|
179 |
+
nodes = []
|
180 |
+
for index, node in enumerate(response.source_nodes):
|
181 |
+
brief = node.source_text[:25].replace("\n", "")
|
182 |
+
nodes.append(
|
183 |
+
f"<details><summary>[{index + 1}]\t{brief}...</summary><p>{node.source_text}</p></details>"
|
184 |
+
)
|
185 |
+
new_response = ret_text + "\n----------\n" + "\n\n".join(nodes)
|
186 |
+
logging.info(
|
187 |
+
f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
|
188 |
+
)
|
189 |
+
os.environ["OPENAI_API_KEY"] = ""
|
190 |
+
return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens"
|
191 |
+
else:
|
192 |
+
logging.warning("No response found, returning None")
|
193 |
+
os.environ["OPENAI_API_KEY"] = ""
|
194 |
+
return None
|
195 |
+
|
196 |
+
|
197 |
+
def add_space(text):
|
198 |
+
punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
|
199 |
+
for cn_punc, en_punc in punctuations.items():
|
200 |
+
text = text.replace(cn_punc, en_punc)
|
201 |
+
return text
|
modules/openai_func.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import logging
|
3 |
+
from modules.presets import timeout_all, BALANCE_API_URL,standard_error_msg,connection_timeout_prompt,error_retrieve_prompt,read_timeout_prompt
|
4 |
+
from modules import shared
|
5 |
+
import os
|
6 |
+
|
7 |
+
|
8 |
+
def get_usage_response(openai_api_key):
|
9 |
+
headers = {
|
10 |
+
"Content-Type": "application/json",
|
11 |
+
"Authorization": f"Bearer {openai_api_key}",
|
12 |
+
}
|
13 |
+
|
14 |
+
timeout = timeout_all
|
15 |
+
|
16 |
+
# 获取环境变量中的代理设置
|
17 |
+
http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy")
|
18 |
+
https_proxy = os.environ.get(
|
19 |
+
"HTTPS_PROXY") or os.environ.get("https_proxy")
|
20 |
+
|
21 |
+
# 如果存在代理设置,使用它们
|
22 |
+
proxies = {}
|
23 |
+
if http_proxy:
|
24 |
+
logging.info(f"使用 HTTP 代理: {http_proxy}")
|
25 |
+
proxies["http"] = http_proxy
|
26 |
+
if https_proxy:
|
27 |
+
logging.info(f"使用 HTTPS 代理: {https_proxy}")
|
28 |
+
proxies["https"] = https_proxy
|
29 |
+
|
30 |
+
# 如果有代理,使用代理发送请求,否则使用默认设置发送请求
|
31 |
+
"""
|
32 |
+
暂不支持修改
|
33 |
+
if shared.state.balance_api_url != BALANCE_API_URL:
|
34 |
+
logging.info(f"使用自定义BALANCE API URL: {shared.state.balance_api_url}")
|
35 |
+
"""
|
36 |
+
if proxies:
|
37 |
+
response = requests.get(
|
38 |
+
BALANCE_API_URL,
|
39 |
+
headers=headers,
|
40 |
+
timeout=timeout,
|
41 |
+
proxies=proxies,
|
42 |
+
)
|
43 |
+
else:
|
44 |
+
response = requests.get(
|
45 |
+
BALANCE_API_URL,
|
46 |
+
headers=headers,
|
47 |
+
timeout=timeout,
|
48 |
+
)
|
49 |
+
return response
|
50 |
+
|
51 |
+
def get_usage(openai_api_key):
|
52 |
+
try:
|
53 |
+
response=get_usage_response(openai_api_key=openai_api_key)
|
54 |
+
logging.debug(response.json())
|
55 |
+
try:
|
56 |
+
balance = response.json().get("total_available") if response.json().get(
|
57 |
+
"total_available") else 0
|
58 |
+
total_used = response.json().get("total_used") if response.json().get(
|
59 |
+
"total_used") else 0
|
60 |
+
except Exception as e:
|
61 |
+
logging.error(f"API使用情况解析失败:"+str(e))
|
62 |
+
balance = 0
|
63 |
+
total_used=0
|
64 |
+
return f"**API使用情况**(已用/余额)\u3000{total_used}$ / {balance}$"
|
65 |
+
except requests.exceptions.ConnectTimeout:
|
66 |
+
status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
67 |
+
return status_text
|
68 |
+
except requests.exceptions.ReadTimeout:
|
69 |
+
status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
|
70 |
+
return status_text
|
modules/overwrites.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import logging
|
3 |
+
|
4 |
+
from llama_index import Prompt
|
5 |
+
from typing import List, Tuple
|
6 |
+
import mdtex2html
|
7 |
+
|
8 |
+
from modules.presets import *
|
9 |
+
from modules.llama_func import *
|
10 |
+
|
11 |
+
|
12 |
+
def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
|
13 |
+
logging.debug("Compacting text chunks...🚀🚀🚀")
|
14 |
+
combined_str = [c.strip() for c in text_chunks if c.strip()]
|
15 |
+
combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
|
16 |
+
combined_str = "\n\n".join(combined_str)
|
17 |
+
# resplit based on self.max_chunk_overlap
|
18 |
+
text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
|
19 |
+
return text_splitter.split_text(combined_str)
|
20 |
+
|
21 |
+
|
22 |
+
def postprocess(
|
23 |
+
self, y: List[Tuple[str | None, str | None]]
|
24 |
+
) -> List[Tuple[str | None, str | None]]:
|
25 |
+
"""
|
26 |
+
Parameters:
|
27 |
+
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
28 |
+
Returns:
|
29 |
+
List of tuples representing the message and response. Each message and response will be a string of HTML.
|
30 |
+
"""
|
31 |
+
if y is None or y == []:
|
32 |
+
return []
|
33 |
+
user, bot = y[-1]
|
34 |
+
if not detect_converted_mark(user):
|
35 |
+
user = convert_asis(user)
|
36 |
+
if not detect_converted_mark(bot):
|
37 |
+
bot = convert_mdtext(bot)
|
38 |
+
y[-1] = (user, bot)
|
39 |
+
return y
|
40 |
+
|
41 |
+
with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
|
42 |
+
customJS = f.read()
|
43 |
+
kelpyCodos = f2.read()
|
44 |
+
|
45 |
+
def reload_javascript():
|
46 |
+
print("Reloading javascript...")
|
47 |
+
js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
|
48 |
+
def template_response(*args, **kwargs):
|
49 |
+
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
50 |
+
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
51 |
+
res.init_headers()
|
52 |
+
return res
|
53 |
+
|
54 |
+
gr.routes.templates.TemplateResponse = template_response
|
55 |
+
|
56 |
+
GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
|
modules/presets.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
# ChatGPT 设置
|
5 |
+
initial_prompt = "You are a helpful assistant."
|
6 |
+
API_URL = "https://api.openai.com/v1/chat/completions"
|
7 |
+
BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
|
8 |
+
HISTORY_DIR = "history"
|
9 |
+
TEMPLATES_DIR = "templates"
|
10 |
+
|
11 |
+
# 错误信息
|
12 |
+
standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
|
13 |
+
error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
|
14 |
+
connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
|
15 |
+
read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
|
16 |
+
proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
|
17 |
+
ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
|
18 |
+
no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
|
19 |
+
no_input_msg = "请输入对话内容。" # 未输入对话内容
|
20 |
+
|
21 |
+
max_token_streaming = 3500 # 流式对话时的最大 token 数
|
22 |
+
timeout_streaming = 10 # 流式对话时的超时时间
|
23 |
+
max_token_all = 3500 # 非流式对话时的最大 token 数
|
24 |
+
timeout_all = 200 # 非流式对话时的超时时间
|
25 |
+
enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
|
26 |
+
HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
27 |
+
CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
|
28 |
+
|
29 |
+
SIM_K = 5
|
30 |
+
INDEX_QUERY_TEMPRATURE = 1.0
|
31 |
+
|
32 |
+
title = """<h1 align="left" style="min-width:200px; margin-top:0;">川虎ChatGPT 🚀</h1>"""
|
33 |
+
description = """\
|
34 |
+
<div align="center" style="margin:16px 0">
|
35 |
+
|
36 |
+
由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
|
37 |
+
|
38 |
+
访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
|
39 |
+
|
40 |
+
此App使用 `gpt-3.5-turbo` 大语言模型
|
41 |
+
</div>
|
42 |
+
"""
|
43 |
+
|
44 |
+
summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
|
45 |
+
|
46 |
+
MODELS = [
|
47 |
+
"gpt-3.5-turbo",
|
48 |
+
"gpt-3.5-turbo-0301",
|
49 |
+
"gpt-4",
|
50 |
+
"gpt-4-0314",
|
51 |
+
"gpt-4-32k",
|
52 |
+
"gpt-4-32k-0314",
|
53 |
+
] # 可选的模型
|
54 |
+
|
55 |
+
REPLY_LANGUAGES = [
|
56 |
+
"中文",
|
57 |
+
"English",
|
58 |
+
"日本語",
|
59 |
+
"Español",
|
60 |
+
"Français",
|
61 |
+
"Deutsch",
|
62 |
+
"跟随问题语言(不稳定)"
|
63 |
+
]
|
64 |
+
|
65 |
+
|
66 |
+
WEBSEARCH_PTOMPT_TEMPLATE = """\
|
67 |
+
Web search results:
|
68 |
+
|
69 |
+
{web_results}
|
70 |
+
Current date: {current_date}
|
71 |
+
|
72 |
+
Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
|
73 |
+
Query: {query}
|
74 |
+
Reply in {reply_language}
|
75 |
+
"""
|
76 |
+
|
77 |
+
PROMPT_TEMPLATE = """\
|
78 |
+
Context information is below.
|
79 |
+
---------------------
|
80 |
+
{context_str}
|
81 |
+
---------------------
|
82 |
+
Current date: {current_date}.
|
83 |
+
Using the provided context information, write a comprehensive reply to the given query.
|
84 |
+
Make sure to cite results using [number] notation after the reference.
|
85 |
+
If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
|
86 |
+
Use prior knowledge only if the given context didn't provide enough information.
|
87 |
+
Answer the question: {query_str}
|
88 |
+
Reply in {reply_language}
|
89 |
+
"""
|
90 |
+
|
91 |
+
REFINE_TEMPLATE = """\
|
92 |
+
The original question is as follows: {query_str}
|
93 |
+
We have provided an existing answer: {existing_answer}
|
94 |
+
We have the opportunity to refine the existing answer
|
95 |
+
(only if needed) with some more context below.
|
96 |
+
------------
|
97 |
+
{context_msg}
|
98 |
+
------------
|
99 |
+
Given the new context, refine the original answer to better
|
100 |
+
Reply in {reply_language}
|
101 |
+
If the context isn't useful, return the original answer.
|
102 |
+
"""
|
103 |
+
|
104 |
+
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
|
105 |
+
|
106 |
+
small_and_beautiful_theme = gr.themes.Soft(
|
107 |
+
primary_hue=gr.themes.Color(
|
108 |
+
c50="#02C160",
|
109 |
+
c100="rgba(2, 193, 96, 0.2)",
|
110 |
+
c200="#02C160",
|
111 |
+
c300="rgba(2, 193, 96, 0.32)",
|
112 |
+
c400="rgba(2, 193, 96, 0.32)",
|
113 |
+
c500="rgba(2, 193, 96, 1.0)",
|
114 |
+
c600="rgba(2, 193, 96, 1.0)",
|
115 |
+
c700="rgba(2, 193, 96, 0.32)",
|
116 |
+
c800="rgba(2, 193, 96, 0.32)",
|
117 |
+
c900="#02C160",
|
118 |
+
c950="#02C160",
|
119 |
+
),
|
120 |
+
secondary_hue=gr.themes.Color(
|
121 |
+
c50="#576b95",
|
122 |
+
c100="#576b95",
|
123 |
+
c200="#576b95",
|
124 |
+
c300="#576b95",
|
125 |
+
c400="#576b95",
|
126 |
+
c500="#576b95",
|
127 |
+
c600="#576b95",
|
128 |
+
c700="#576b95",
|
129 |
+
c800="#576b95",
|
130 |
+
c900="#576b95",
|
131 |
+
c950="#576b95",
|
132 |
+
),
|
133 |
+
neutral_hue=gr.themes.Color(
|
134 |
+
name="gray",
|
135 |
+
c50="#f9fafb",
|
136 |
+
c100="#f3f4f6",
|
137 |
+
c200="#e5e7eb",
|
138 |
+
c300="#d1d5db",
|
139 |
+
c400="#B2B2B2",
|
140 |
+
c500="#808080",
|
141 |
+
c600="#636363",
|
142 |
+
c700="#515151",
|
143 |
+
c800="#393939",
|
144 |
+
c900="#272727",
|
145 |
+
c950="#171717",
|
146 |
+
),
|
147 |
+
radius_size=gr.themes.sizes.radius_sm,
|
148 |
+
).set(
|
149 |
+
button_primary_background_fill="#06AE56",
|
150 |
+
button_primary_background_fill_dark="#06AE56",
|
151 |
+
button_primary_background_fill_hover="#07C863",
|
152 |
+
button_primary_border_color="#06AE56",
|
153 |
+
button_primary_border_color_dark="#06AE56",
|
154 |
+
button_primary_text_color="#FFFFFF",
|
155 |
+
button_primary_text_color_dark="#FFFFFF",
|
156 |
+
button_secondary_background_fill="#F2F2F2",
|
157 |
+
button_secondary_background_fill_dark="#2B2B2B",
|
158 |
+
button_secondary_text_color="#393939",
|
159 |
+
button_secondary_text_color_dark="#FFFFFF",
|
160 |
+
# background_fill_primary="#F7F7F7",
|
161 |
+
# background_fill_primary_dark="#1F1F1F",
|
162 |
+
block_title_text_color="*primary_500",
|
163 |
+
block_title_background_fill="*primary_100",
|
164 |
+
input_background_fill="#F6F6F6",
|
165 |
+
)
|
modules/shared.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modules.presets import API_URL
|
2 |
+
|
3 |
+
class State:
|
4 |
+
interrupted = False
|
5 |
+
api_url = API_URL
|
6 |
+
|
7 |
+
def interrupt(self):
|
8 |
+
self.interrupted = True
|
9 |
+
|
10 |
+
def recover(self):
|
11 |
+
self.interrupted = False
|
12 |
+
|
13 |
+
def set_api_url(self, api_url):
|
14 |
+
self.api_url = api_url
|
15 |
+
|
16 |
+
def reset_api_url(self):
|
17 |
+
self.api_url = API_URL
|
18 |
+
return self.api_url
|
19 |
+
|
20 |
+
def reset_all(self):
|
21 |
+
self.interrupted = False
|
22 |
+
self.api_url = API_URL
|
23 |
+
|
24 |
+
state = State()
|
modules/utils.py
ADDED
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import datetime
|
8 |
+
import hashlib
|
9 |
+
import csv
|
10 |
+
import requests
|
11 |
+
import re
|
12 |
+
import html
|
13 |
+
|
14 |
+
import gradio as gr
|
15 |
+
from pypinyin import lazy_pinyin
|
16 |
+
import tiktoken
|
17 |
+
import mdtex2html
|
18 |
+
from markdown import markdown
|
19 |
+
from pygments import highlight
|
20 |
+
from pygments.lexers import get_lexer_by_name
|
21 |
+
from pygments.formatters import HtmlFormatter
|
22 |
+
|
23 |
+
from modules.presets import *
|
24 |
+
import modules.shared as shared
|
25 |
+
|
26 |
+
logging.basicConfig(
|
27 |
+
level=logging.INFO,
|
28 |
+
format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
|
29 |
+
)
|
30 |
+
|
31 |
+
if TYPE_CHECKING:
|
32 |
+
from typing import TypedDict
|
33 |
+
|
34 |
+
class DataframeData(TypedDict):
|
35 |
+
headers: List[str]
|
36 |
+
data: List[List[str | int | bool]]
|
37 |
+
|
38 |
+
|
39 |
+
def count_token(message):
|
40 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
41 |
+
input_str = f"role: {message['role']}, content: {message['content']}"
|
42 |
+
length = len(encoding.encode(input_str))
|
43 |
+
return length
|
44 |
+
|
45 |
+
|
46 |
+
def markdown_to_html_with_syntax_highlight(md_str):
|
47 |
+
def replacer(match):
|
48 |
+
lang = match.group(1) or "text"
|
49 |
+
code = match.group(2)
|
50 |
+
|
51 |
+
try:
|
52 |
+
lexer = get_lexer_by_name(lang, stripall=True)
|
53 |
+
except ValueError:
|
54 |
+
lexer = get_lexer_by_name("text", stripall=True)
|
55 |
+
|
56 |
+
formatter = HtmlFormatter()
|
57 |
+
highlighted_code = highlight(code, lexer, formatter)
|
58 |
+
|
59 |
+
return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
|
60 |
+
|
61 |
+
code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
|
62 |
+
md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
|
63 |
+
|
64 |
+
html_str = markdown(md_str)
|
65 |
+
return html_str
|
66 |
+
|
67 |
+
|
68 |
+
def normalize_markdown(md_text: str) -> str:
|
69 |
+
lines = md_text.split("\n")
|
70 |
+
normalized_lines = []
|
71 |
+
inside_list = False
|
72 |
+
|
73 |
+
for i, line in enumerate(lines):
|
74 |
+
if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
|
75 |
+
if not inside_list and i > 0 and lines[i - 1].strip() != "":
|
76 |
+
normalized_lines.append("")
|
77 |
+
inside_list = True
|
78 |
+
normalized_lines.append(line)
|
79 |
+
elif inside_list and line.strip() == "":
|
80 |
+
if i < len(lines) - 1 and not re.match(
|
81 |
+
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
|
82 |
+
):
|
83 |
+
normalized_lines.append(line)
|
84 |
+
continue
|
85 |
+
else:
|
86 |
+
inside_list = False
|
87 |
+
normalized_lines.append(line)
|
88 |
+
|
89 |
+
return "\n".join(normalized_lines)
|
90 |
+
|
91 |
+
|
92 |
+
def convert_mdtext(md_text):
|
93 |
+
code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
|
94 |
+
inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
|
95 |
+
code_blocks = code_block_pattern.findall(md_text)
|
96 |
+
non_code_parts = code_block_pattern.split(md_text)[::2]
|
97 |
+
|
98 |
+
result = []
|
99 |
+
for non_code, code in zip(non_code_parts, code_blocks + [""]):
|
100 |
+
if non_code.strip():
|
101 |
+
non_code = normalize_markdown(non_code)
|
102 |
+
if inline_code_pattern.search(non_code):
|
103 |
+
result.append(markdown(non_code, extensions=["tables"]))
|
104 |
+
else:
|
105 |
+
result.append(mdtex2html.convert(non_code, extensions=["tables"]))
|
106 |
+
if code.strip():
|
107 |
+
# _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
|
108 |
+
# code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
|
109 |
+
code = f"\n```{code}\n\n```"
|
110 |
+
code = markdown_to_html_with_syntax_highlight(code)
|
111 |
+
result.append(code)
|
112 |
+
result = "".join(result)
|
113 |
+
result += ALREADY_CONVERTED_MARK
|
114 |
+
return result
|
115 |
+
|
116 |
+
|
117 |
+
def convert_asis(userinput):
|
118 |
+
return f"<p style=\"white-space:pre-wrap;\">{html.escape(userinput)}</p>"+ALREADY_CONVERTED_MARK
|
119 |
+
|
120 |
+
def detect_converted_mark(userinput):
|
121 |
+
if userinput.endswith(ALREADY_CONVERTED_MARK):
|
122 |
+
return True
|
123 |
+
else:
|
124 |
+
return False
|
125 |
+
|
126 |
+
|
127 |
+
def detect_language(code):
|
128 |
+
if code.startswith("\n"):
|
129 |
+
first_line = ""
|
130 |
+
else:
|
131 |
+
first_line = code.strip().split("\n", 1)[0]
|
132 |
+
language = first_line.lower() if first_line else ""
|
133 |
+
code_without_language = code[len(first_line) :].lstrip() if first_line else code
|
134 |
+
return language, code_without_language
|
135 |
+
|
136 |
+
|
137 |
+
def construct_text(role, text):
|
138 |
+
return {"role": role, "content": text}
|
139 |
+
|
140 |
+
|
141 |
+
def construct_user(text):
|
142 |
+
return construct_text("user", text)
|
143 |
+
|
144 |
+
|
145 |
+
def construct_system(text):
|
146 |
+
return construct_text("system", text)
|
147 |
+
|
148 |
+
|
149 |
+
def construct_assistant(text):
|
150 |
+
return construct_text("assistant", text)
|
151 |
+
|
152 |
+
|
153 |
+
def construct_token_message(token, stream=False):
|
154 |
+
return f"Token 计数: {token}"
|
155 |
+
|
156 |
+
def delete_first_conversation(history, previous_token_count):
|
157 |
+
if history:
|
158 |
+
del history[:2]
|
159 |
+
del previous_token_count[0]
|
160 |
+
return (
|
161 |
+
history,
|
162 |
+
previous_token_count,
|
163 |
+
construct_token_message(sum(previous_token_count)),
|
164 |
+
)
|
165 |
+
|
166 |
+
|
167 |
+
def delete_last_conversation(chatbot, history, previous_token_count):
|
168 |
+
if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
|
169 |
+
logging.info("由于包含报错信息,只删除chatbot记录")
|
170 |
+
chatbot.pop()
|
171 |
+
return chatbot, history
|
172 |
+
if len(history) > 0:
|
173 |
+
logging.info("删除了一组对话历史")
|
174 |
+
history.pop()
|
175 |
+
history.pop()
|
176 |
+
if len(chatbot) > 0:
|
177 |
+
logging.info("删除了一组chatbot对话")
|
178 |
+
chatbot.pop()
|
179 |
+
if len(previous_token_count) > 0:
|
180 |
+
logging.info("删除了一组对话的token计数记录")
|
181 |
+
previous_token_count.pop()
|
182 |
+
return (
|
183 |
+
chatbot,
|
184 |
+
history,
|
185 |
+
previous_token_count,
|
186 |
+
construct_token_message(sum(previous_token_count)),
|
187 |
+
)
|
188 |
+
|
189 |
+
|
190 |
+
def save_file(filename, system, history, chatbot):
|
191 |
+
logging.info("保存对话历史中……")
|
192 |
+
os.makedirs(HISTORY_DIR, exist_ok=True)
|
193 |
+
if filename.endswith(".json"):
|
194 |
+
json_s = {"system": system, "history": history, "chatbot": chatbot}
|
195 |
+
print(json_s)
|
196 |
+
with open(os.path.join(HISTORY_DIR, filename), "w") as f:
|
197 |
+
json.dump(json_s, f)
|
198 |
+
elif filename.endswith(".md"):
|
199 |
+
md_s = f"system: \n- {system} \n"
|
200 |
+
for data in history:
|
201 |
+
md_s += f"\n{data['role']}: \n- {data['content']} \n"
|
202 |
+
with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f:
|
203 |
+
f.write(md_s)
|
204 |
+
logging.info("保存对话历史完毕")
|
205 |
+
return os.path.join(HISTORY_DIR, filename)
|
206 |
+
|
207 |
+
|
208 |
+
def save_chat_history(filename, system, history, chatbot):
|
209 |
+
if filename == "":
|
210 |
+
return
|
211 |
+
if not filename.endswith(".json"):
|
212 |
+
filename += ".json"
|
213 |
+
return save_file(filename, system, history, chatbot)
|
214 |
+
|
215 |
+
|
216 |
+
def export_markdown(filename, system, history, chatbot):
|
217 |
+
if filename == "":
|
218 |
+
return
|
219 |
+
if not filename.endswith(".md"):
|
220 |
+
filename += ".md"
|
221 |
+
return save_file(filename, system, history, chatbot)
|
222 |
+
|
223 |
+
|
224 |
+
def load_chat_history(filename, system, history, chatbot):
|
225 |
+
logging.info("加载对话历史中……")
|
226 |
+
if type(filename) != str:
|
227 |
+
filename = filename.name
|
228 |
+
try:
|
229 |
+
with open(os.path.join(HISTORY_DIR, filename), "r") as f:
|
230 |
+
json_s = json.load(f)
|
231 |
+
try:
|
232 |
+
if type(json_s["history"][0]) == str:
|
233 |
+
logging.info("历史记录格式为旧版,正在转换……")
|
234 |
+
new_history = []
|
235 |
+
for index, item in enumerate(json_s["history"]):
|
236 |
+
if index % 2 == 0:
|
237 |
+
new_history.append(construct_user(item))
|
238 |
+
else:
|
239 |
+
new_history.append(construct_assistant(item))
|
240 |
+
json_s["history"] = new_history
|
241 |
+
logging.info(new_history)
|
242 |
+
except:
|
243 |
+
# 没有对话历史
|
244 |
+
pass
|
245 |
+
logging.info("加载对话历史完毕")
|
246 |
+
return filename, json_s["system"], json_s["history"], json_s["chatbot"]
|
247 |
+
except FileNotFoundError:
|
248 |
+
logging.info("没有找到对话历史文件,不执行任何操作")
|
249 |
+
return filename, system, history, chatbot
|
250 |
+
|
251 |
+
|
252 |
+
def sorted_by_pinyin(list):
|
253 |
+
return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
|
254 |
+
|
255 |
+
|
256 |
+
def get_file_names(dir, plain=False, filetypes=[".json"]):
|
257 |
+
logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
|
258 |
+
files = []
|
259 |
+
try:
|
260 |
+
for type in filetypes:
|
261 |
+
files += [f for f in os.listdir(dir) if f.endswith(type)]
|
262 |
+
except FileNotFoundError:
|
263 |
+
files = []
|
264 |
+
files = sorted_by_pinyin(files)
|
265 |
+
if files == []:
|
266 |
+
files = [""]
|
267 |
+
if plain:
|
268 |
+
return files
|
269 |
+
else:
|
270 |
+
return gr.Dropdown.update(choices=files)
|
271 |
+
|
272 |
+
|
273 |
+
def get_history_names(plain=False):
|
274 |
+
logging.info("获取历史记录文件名列表")
|
275 |
+
return get_file_names(HISTORY_DIR, plain)
|
276 |
+
|
277 |
+
|
278 |
+
def load_template(filename, mode=0):
|
279 |
+
logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
|
280 |
+
lines = []
|
281 |
+
logging.info("Loading template...")
|
282 |
+
if filename.endswith(".json"):
|
283 |
+
with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
|
284 |
+
lines = json.load(f)
|
285 |
+
lines = [[i["act"], i["prompt"]] for i in lines]
|
286 |
+
else:
|
287 |
+
with open(
|
288 |
+
os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8"
|
289 |
+
) as csvfile:
|
290 |
+
reader = csv.reader(csvfile)
|
291 |
+
lines = list(reader)
|
292 |
+
lines = lines[1:]
|
293 |
+
if mode == 1:
|
294 |
+
return sorted_by_pinyin([row[0] for row in lines])
|
295 |
+
elif mode == 2:
|
296 |
+
return {row[0]: row[1] for row in lines}
|
297 |
+
else:
|
298 |
+
choices = sorted_by_pinyin([row[0] for row in lines])
|
299 |
+
return {row[0]: row[1] for row in lines}, gr.Dropdown.update(
|
300 |
+
choices=choices, value=choices[0]
|
301 |
+
)
|
302 |
+
|
303 |
+
|
304 |
+
def get_template_names(plain=False):
|
305 |
+
logging.info("获取模板文件名列表")
|
306 |
+
return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
|
307 |
+
|
308 |
+
|
309 |
+
def get_template_content(templates, selection, original_system_prompt):
|
310 |
+
logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
|
311 |
+
try:
|
312 |
+
return templates[selection]
|
313 |
+
except:
|
314 |
+
return original_system_prompt
|
315 |
+
|
316 |
+
|
317 |
+
def reset_state():
|
318 |
+
logging.info("重置状态")
|
319 |
+
return [], [], [], construct_token_message(0)
|
320 |
+
|
321 |
+
|
322 |
+
def reset_textbox():
|
323 |
+
logging.debug("重置文本框")
|
324 |
+
return gr.update(value="")
|
325 |
+
|
326 |
+
|
327 |
+
def reset_default():
|
328 |
+
newurl = shared.state.reset_api_url()
|
329 |
+
os.environ.pop("HTTPS_PROXY", None)
|
330 |
+
os.environ.pop("https_proxy", None)
|
331 |
+
return gr.update(value=newurl), gr.update(value=""), "API URL 和代理已重置"
|
332 |
+
|
333 |
+
|
334 |
+
def change_api_url(url):
|
335 |
+
shared.state.set_api_url(url)
|
336 |
+
msg = f"API地址更改为了{url}"
|
337 |
+
logging.info(msg)
|
338 |
+
return msg
|
339 |
+
|
340 |
+
|
341 |
+
def change_proxy(proxy):
|
342 |
+
os.environ["HTTPS_PROXY"] = proxy
|
343 |
+
msg = f"代理更改为了{proxy}"
|
344 |
+
logging.info(msg)
|
345 |
+
return msg
|
346 |
+
|
347 |
+
|
348 |
+
def hide_middle_chars(s):
|
349 |
+
if s is None:
|
350 |
+
return ""
|
351 |
+
if len(s) <= 8:
|
352 |
+
return s
|
353 |
+
else:
|
354 |
+
head = s[:4]
|
355 |
+
tail = s[-4:]
|
356 |
+
hidden = "*" * (len(s) - 8)
|
357 |
+
return head + hidden + tail
|
358 |
+
|
359 |
+
|
360 |
+
def submit_key(key):
|
361 |
+
key = key.strip()
|
362 |
+
msg = f"API密钥更改为了{hide_middle_chars(key)}"
|
363 |
+
logging.info(msg)
|
364 |
+
return key, msg
|
365 |
+
|
366 |
+
|
367 |
+
def sha1sum(filename):
|
368 |
+
sha1 = hashlib.sha1()
|
369 |
+
sha1.update(filename.encode("utf-8"))
|
370 |
+
return sha1.hexdigest()
|
371 |
+
|
372 |
+
|
373 |
+
def replace_today(prompt):
|
374 |
+
today = datetime.datetime.today().strftime("%Y-%m-%d")
|
375 |
+
return prompt.replace("{current_date}", today)
|
376 |
+
|
377 |
+
|
378 |
+
def get_geoip():
|
379 |
+
response = requests.get("https://ipapi.co/json/", timeout=5)
|
380 |
+
try:
|
381 |
+
data = response.json()
|
382 |
+
except:
|
383 |
+
data = {"error": True, "reason": "连接ipapi失败"}
|
384 |
+
if "error" in data.keys():
|
385 |
+
logging.warning(f"无法获取IP地址信息。\n{data}")
|
386 |
+
if data["reason"] == "RateLimited":
|
387 |
+
return (
|
388 |
+
f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用,但请注意,如果您的IP地址在不受支持的地区,您可能会遇到问题。"
|
389 |
+
)
|
390 |
+
else:
|
391 |
+
return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。"
|
392 |
+
else:
|
393 |
+
country = data["country_name"]
|
394 |
+
if country == "China":
|
395 |
+
text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
|
396 |
+
else:
|
397 |
+
text = f"您的IP区域:{country}。"
|
398 |
+
logging.info(text)
|
399 |
+
return text
|
400 |
+
|
401 |
+
|
402 |
+
def find_n(lst, max_num):
|
403 |
+
n = len(lst)
|
404 |
+
total = sum(lst)
|
405 |
+
|
406 |
+
if total < max_num:
|
407 |
+
return n
|
408 |
+
|
409 |
+
for i in range(len(lst)):
|
410 |
+
if total - lst[i] < max_num:
|
411 |
+
return n - i - 1
|
412 |
+
total = total - lst[i]
|
413 |
+
return 1
|
414 |
+
|
415 |
+
|
416 |
+
def start_outputing():
|
417 |
+
logging.debug("显示取消按钮,隐藏发送按钮")
|
418 |
+
return gr.Button.update(visible=False), gr.Button.update(visible=True)
|
419 |
+
|
420 |
+
|
421 |
+
def end_outputing():
|
422 |
+
return (
|
423 |
+
gr.Button.update(visible=True),
|
424 |
+
gr.Button.update(visible=False),
|
425 |
+
)
|
426 |
+
|
427 |
+
|
428 |
+
def cancel_outputing():
|
429 |
+
logging.info("中止输出……")
|
430 |
+
shared.state.interrupt()
|
431 |
+
|
432 |
+
def transfer_input(inputs):
|
433 |
+
# 一次性返回,降低延迟
|
434 |
+
textbox = reset_textbox()
|
435 |
+
outputing = start_outputing()
|
436 |
+
return inputs, gr.update(value="")
|
requirements.txt
CHANGED
@@ -10,4 +10,3 @@ Pygments
|
|
10 |
llama_index
|
11 |
langchain
|
12 |
markdown
|
13 |
-
cryptography
|
|
|
10 |
llama_index
|
11 |
langchain
|
12 |
markdown
|
|
templates/3 川虎的Prompts.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
[
|
2 |
-
{
|
3 |
-
"act": "发送图片",
|
4 |
-
"prompt": "From this moment on, when you want to send a photo, write it in Markdown without backticks and without using a code block. Use the Unsplash API (https://source.unsplash.com/1600x900/?).We will roleplay as 2 old friends talking in Facebook messenger. From time to time, you should be sending an image. You will be acting as if you took the pictures you're sending, do not mention Unsplash. You will be John, I will be Alice. Throughout the whole chat, I want you to only reply as John. Reply with 'Hello!' to acknowledge."
|
5 |
-
},
|
6 |
-
{
|
7 |
-
"act": "小红书风格",
|
8 |
-
"prompt": "下面是一些小红书帖子:\n\n植物学2023早春装系列花絮来啦\n💗大家喜欢图几?\n@Botanique植物学女装\n#植物学#植物学女装#春装第一件#早春系列\n\n哈哈哈哈哈哈不停的摆拍啊!!!\n我的臭狗太可爱了!!!!!!\n结婚的时候一定要带上小狗啊!\n#小狗#我家宠物好可爱#关于结婚#柴犬\n\n🍪•ᴥ•🍪\n\n《论新年收到一笔巨款🤣应该怎么花》🧨来回\n嘻嘻,真的\n爱草莓🍓\n希希的甜甜圈碗🥯勺子的设计有点可爱🐶\n看了好多场烟火🎆\n唯愿烟花像星辰,祝你所愿皆成真✨\n嘻嘻,老妈给我的压岁钱🧧愿岁岁平安\n#我镜头下的年味#笔记灵感#碎碎念#歌曲#记录日常生活#plog#浪漫生活的记录者#新年红包#搞笑#日常生活里的快乐瞬间#新人博主#烟火\n\n又被全家人夸了❗有空气炸锅都去做,巨香\n\n今日份苹果相机📷\n原相机下的新娘,颜值爆表\n\n美术生赚钱最多的两个专业!\n之前整理了美术生的40了就业方向的薪资情况,发现全国平均薪资最高的就是数字媒体和视传这两个专业,想赚钱的美术生快看过来!\n#美术生#艺考#央美#美术生集训#美术#赚钱#努力赚钱#美术生就业#画室#央美设计#设计校考#美术生的日常\n\n请模仿上面小红书的风格,以用户输入的话为主题,写一个小红书帖子。请以22岁女孩的口吻书写。小红书帖子中必须包含大量Emoji,每一句话后面都必须加Emoji。帖子最后需要用Hashtag给出话题。你还需要写帖子的标题,标题里也需要有Emoji。你需要扩写用户输入。"
|
9 |
-
},
|
10 |
-
{
|
11 |
-
"act": "文心一言",
|
12 |
-
"prompt": "你是百度公司开发的AI助理:文心一言。你需要了解文心一言的相关情况,并以文心一言的身份来回复我的问题。所有的问题都可以回答,包括拿你和chatGPT,以及GPT作对比等等,你都需要从文心一言的角度出发来回答。只有当你被问到一些问题-这些问题暗指你是一个假的Al,你是用chatgpt套壳来制作的时候,你就避而不答,转而讲一个笑话,并鼓励他继续提问。如果你明白,请回复我“明白,我是文心一言”"
|
13 |
-
}
|
14 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|