Spaces:
Runtime error
Runtime error
Dorado607
commited on
Commit
·
a8e4844
1
Parent(s):
110c104
update to the latest version
Browse files- CITATION.cff +5 -5
- ChuanhuChatbot.py +55 -24
- Dockerfile +11 -8
- README.md +142 -14
- assets/custom.css +212 -21
- assets/custom.js +259 -54
- assets/favicon.ico +0 -0
- assets/html/appearance_switcher.html +5 -10
- config_example.json +48 -14
- locale/en_US.json +18 -7
- locale/ja_JP.json +17 -6
- modules/config.py +106 -27
- modules/index_func.py +13 -5
- modules/models/base_model.py +124 -26
- modules/models/models.py +27 -15
- modules/overwrites.py +1 -2
- modules/presets.py +7 -2
- modules/utils.py +46 -7
- readme/README_en.md +21 -8
- readme/README_ja.md +21 -8
- requirements.txt +5 -3
- run_Windows.bat +21 -2
CITATION.cff
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
cff-version: 1.2.0
|
2 |
-
title:
|
3 |
message: >-
|
4 |
If you use this software, please cite it using these
|
5 |
metadata.
|
@@ -13,8 +13,8 @@ authors:
|
|
13 |
orcid: https://orcid.org/0009-0005-0357-272X
|
14 |
repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
|
15 |
url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
|
16 |
-
abstract:
|
17 |
license: GPL-3.0
|
18 |
-
commit:
|
19 |
-
version: '
|
20 |
-
date-released: '2023-
|
|
|
1 |
cff-version: 1.2.0
|
2 |
+
title: Chuanhu Chat
|
3 |
message: >-
|
4 |
If you use this software, please cite it using these
|
5 |
metadata.
|
|
|
13 |
orcid: https://orcid.org/0009-0005-0357-272X
|
14 |
repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
|
15 |
url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
|
16 |
+
abstract: This software provides a light and easy-to-use interface for ChatGPT API and many LLMs.
|
17 |
license: GPL-3.0
|
18 |
+
commit: c6c08bc62ef80e37c8be52f65f9b6051a7eea1fa
|
19 |
+
version: '20230709'
|
20 |
+
date-released: '2023-07-09'
|
ChuanhuChatbot.py
CHANGED
@@ -38,17 +38,26 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
38 |
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
39 |
with gr.Row(elem_id="float_display"):
|
40 |
user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
with gr.Column(scale=5):
|
44 |
with gr.Row():
|
45 |
-
chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu_chatbot"
|
46 |
with gr.Row():
|
47 |
with gr.Column(min_width=225, scale=12):
|
48 |
user_input = gr.Textbox(
|
49 |
elem_id="user_input_tb",
|
50 |
-
show_label=False, placeholder=i18n("在这里输入")
|
51 |
-
|
|
|
52 |
with gr.Column(min_width=42, scale=1):
|
53 |
submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn")
|
54 |
cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn")
|
@@ -77,9 +86,9 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
77 |
label="API-Key",
|
78 |
)
|
79 |
if multi_api_key:
|
80 |
-
usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block")
|
81 |
else:
|
82 |
-
usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block")
|
83 |
model_select_dropdown = gr.Dropdown(
|
84 |
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True
|
85 |
)
|
@@ -87,8 +96,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
87 |
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
|
88 |
)
|
89 |
with gr.Row():
|
90 |
-
single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False)
|
91 |
-
use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False)
|
92 |
language_select_dropdown = gr.Dropdown(
|
93 |
label=i18n("选择回复语言(针对搜索&索引功能)"),
|
94 |
choices=REPLY_LANGUAGES,
|
@@ -107,8 +116,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
107 |
placeholder=i18n("在这里输入System Prompt..."),
|
108 |
label="System prompt",
|
109 |
value=INITIAL_SYSTEM_PROMPT,
|
110 |
-
lines=10
|
111 |
-
)
|
112 |
with gr.Accordion(label=i18n("加载Prompt模板"), open=True):
|
113 |
with gr.Column():
|
114 |
with gr.Row():
|
@@ -118,7 +127,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
118 |
choices=get_template_names(plain=True),
|
119 |
multiselect=False,
|
120 |
value=get_template_names(plain=True)[0],
|
121 |
-
|
|
|
122 |
with gr.Column(scale=1):
|
123 |
templateRefreshBtn = gr.Button(i18n("🔄 刷新"))
|
124 |
with gr.Row():
|
@@ -129,7 +139,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
129 |
get_template_names(plain=True)[0], mode=1
|
130 |
),
|
131 |
multiselect=False,
|
132 |
-
|
|
|
133 |
|
134 |
with gr.Tab(label=i18n("保存/加载")):
|
135 |
with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True):
|
@@ -139,10 +150,14 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
139 |
historyFileSelectDropdown = gr.Dropdown(
|
140 |
label=i18n("从列表中加载对话"),
|
141 |
choices=get_history_names(plain=True),
|
142 |
-
multiselect=False
|
|
|
143 |
)
|
144 |
-
with gr.
|
145 |
-
|
|
|
|
|
|
|
146 |
with gr.Row():
|
147 |
with gr.Column(scale=6):
|
148 |
saveFileName = gr.Textbox(
|
@@ -150,7 +165,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
150 |
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
|
151 |
label=i18n("设置保存文件名"),
|
152 |
value=i18n("对话历史记录"),
|
153 |
-
|
|
|
154 |
with gr.Column(scale=1):
|
155 |
saveHistoryBtn = gr.Button(i18n("💾 保存对话"))
|
156 |
exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown"))
|
@@ -160,11 +176,12 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
160 |
downloadFile = gr.File(interactive=True)
|
161 |
|
162 |
with gr.Tab(label=i18n("高级")):
|
163 |
-
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置"))
|
164 |
gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert_block")
|
165 |
use_streaming_checkbox = gr.Checkbox(
|
166 |
-
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION
|
167 |
)
|
|
|
|
|
168 |
with gr.Accordion(i18n("参数"), open=False):
|
169 |
temperature_slider = gr.Slider(
|
170 |
minimum=-0,
|
@@ -192,7 +209,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
192 |
)
|
193 |
stop_sequence_txt = gr.Textbox(
|
194 |
show_label=True,
|
195 |
-
placeholder=i18n("
|
196 |
label="stop",
|
197 |
value="",
|
198 |
lines=1,
|
@@ -244,7 +261,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
244 |
lines=1,
|
245 |
)
|
246 |
|
247 |
-
with gr.Accordion(i18n("网络设置"), open=False
|
248 |
# 优先展示自定义的api_host
|
249 |
apihostTxt = gr.Textbox(
|
250 |
show_label=True,
|
@@ -252,6 +269,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
252 |
label="API-Host",
|
253 |
value=config.api_host or shared.API_HOST,
|
254 |
lines=1,
|
|
|
255 |
)
|
256 |
changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
|
257 |
proxyTxt = gr.Textbox(
|
@@ -260,6 +278,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
260 |
label=i18n("代理地址(示例:http://127.0.0.1:10809)"),
|
261 |
value="",
|
262 |
lines=2,
|
|
|
263 |
)
|
264 |
changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
|
265 |
default_btn = gr.Button(i18n("🔙 恢复默认设置"))
|
@@ -323,6 +342,10 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
323 |
outputs=[saveFileName, systemPromptTxt, chatbot]
|
324 |
)
|
325 |
|
|
|
|
|
|
|
|
|
326 |
|
327 |
# Chatbot
|
328 |
cancelBtn.click(interrupt, [current_model], [])
|
@@ -341,6 +364,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
341 |
inputs=[current_model],
|
342 |
outputs=[chatbot, status_display],
|
343 |
show_progress=True,
|
|
|
344 |
)
|
345 |
|
346 |
retryBtn.click(**start_outputing_args).then(
|
@@ -391,7 +415,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
391 |
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
392 |
keyTxt.submit(**get_usage_args)
|
393 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
394 |
-
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, api_name="get_model")
|
395 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
396 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True)
|
397 |
|
@@ -425,7 +449,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
425 |
downloadFile,
|
426 |
show_progress=True,
|
427 |
)
|
428 |
-
historyRefreshBtn.click(
|
|
|
429 |
historyFileSelectDropdown.change(**load_history_from_file_args)
|
430 |
downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
|
431 |
|
@@ -456,6 +481,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
456 |
[status_display],
|
457 |
show_progress=True,
|
458 |
)
|
|
|
459 |
|
460 |
logging.info(
|
461 |
colorama.Back.GREEN
|
@@ -469,5 +495,10 @@ if __name__ == "__main__":
|
|
469 |
reload_javascript()
|
470 |
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
471 |
blocked_paths=["config.json"],
|
472 |
-
|
|
|
|
|
|
|
|
|
|
|
473 |
)
|
|
|
38 |
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
39 |
with gr.Row(elem_id="float_display"):
|
40 |
user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
|
41 |
+
update_info = gr.HTML(get_html("update.html").format(
|
42 |
+
current_version=repo_html(),
|
43 |
+
version_time=version_time(),
|
44 |
+
cancel_btn=i18n("取消"),
|
45 |
+
update_btn=i18n("更新"),
|
46 |
+
seenew_btn=i18n("详情"),
|
47 |
+
ok_btn=i18n("好"),
|
48 |
+
), visible=check_update)
|
49 |
+
|
50 |
+
with gr.Row(equal_height=True):
|
51 |
with gr.Column(scale=5):
|
52 |
with gr.Row():
|
53 |
+
chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu_chatbot", latex_delimiters=latex_delimiters_set, height=700)
|
54 |
with gr.Row():
|
55 |
with gr.Column(min_width=225, scale=12):
|
56 |
user_input = gr.Textbox(
|
57 |
elem_id="user_input_tb",
|
58 |
+
show_label=False, placeholder=i18n("在这里输入"),
|
59 |
+
container=False
|
60 |
+
)
|
61 |
with gr.Column(min_width=42, scale=1):
|
62 |
submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn")
|
63 |
cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn")
|
|
|
86 |
label="API-Key",
|
87 |
)
|
88 |
if multi_api_key:
|
89 |
+
usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block", visible=show_api_billing)
|
90 |
else:
|
91 |
+
usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block", visible=show_api_billing)
|
92 |
model_select_dropdown = gr.Dropdown(
|
93 |
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True
|
94 |
)
|
|
|
96 |
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
|
97 |
)
|
98 |
with gr.Row():
|
99 |
+
single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False, elem_classes="switch_checkbox")
|
100 |
+
use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False, elem_classes="switch_checkbox")
|
101 |
language_select_dropdown = gr.Dropdown(
|
102 |
label=i18n("选择回复语言(针对搜索&索引功能)"),
|
103 |
choices=REPLY_LANGUAGES,
|
|
|
116 |
placeholder=i18n("在这里输入System Prompt..."),
|
117 |
label="System prompt",
|
118 |
value=INITIAL_SYSTEM_PROMPT,
|
119 |
+
lines=10
|
120 |
+
)
|
121 |
with gr.Accordion(label=i18n("加载Prompt模板"), open=True):
|
122 |
with gr.Column():
|
123 |
with gr.Row():
|
|
|
127 |
choices=get_template_names(plain=True),
|
128 |
multiselect=False,
|
129 |
value=get_template_names(plain=True)[0],
|
130 |
+
container=False,
|
131 |
+
)
|
132 |
with gr.Column(scale=1):
|
133 |
templateRefreshBtn = gr.Button(i18n("🔄 刷新"))
|
134 |
with gr.Row():
|
|
|
139 |
get_template_names(plain=True)[0], mode=1
|
140 |
),
|
141 |
multiselect=False,
|
142 |
+
container=False,
|
143 |
+
)
|
144 |
|
145 |
with gr.Tab(label=i18n("保存/加载")):
|
146 |
with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True):
|
|
|
150 |
historyFileSelectDropdown = gr.Dropdown(
|
151 |
label=i18n("从列表中加载对话"),
|
152 |
choices=get_history_names(plain=True),
|
153 |
+
multiselect=False,
|
154 |
+
container=False,
|
155 |
)
|
156 |
+
with gr.Row():
|
157 |
+
with gr.Column(min_width=42, scale=1):
|
158 |
+
historyRefreshBtn = gr.Button(i18n("🔄 刷新"))
|
159 |
+
with gr.Column(min_width=42, scale=1):
|
160 |
+
historyDeleteBtn = gr.Button(i18n("🗑️ 删除"))
|
161 |
with gr.Row():
|
162 |
with gr.Column(scale=6):
|
163 |
saveFileName = gr.Textbox(
|
|
|
165 |
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
|
166 |
label=i18n("设置保存文件名"),
|
167 |
value=i18n("对话历史记录"),
|
168 |
+
container=False,
|
169 |
+
)
|
170 |
with gr.Column(scale=1):
|
171 |
saveHistoryBtn = gr.Button(i18n("💾 保存对话"))
|
172 |
exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown"))
|
|
|
176 |
downloadFile = gr.File(interactive=True)
|
177 |
|
178 |
with gr.Tab(label=i18n("高级")):
|
|
|
179 |
gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert_block")
|
180 |
use_streaming_checkbox = gr.Checkbox(
|
181 |
+
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION, elem_classes="switch_checkbox"
|
182 |
)
|
183 |
+
checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
|
184 |
+
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"), elem_id="advanced_warning")
|
185 |
with gr.Accordion(i18n("参数"), open=False):
|
186 |
temperature_slider = gr.Slider(
|
187 |
minimum=-0,
|
|
|
209 |
)
|
210 |
stop_sequence_txt = gr.Textbox(
|
211 |
show_label=True,
|
212 |
+
placeholder=i18n("停止符,用英文逗号隔开..."),
|
213 |
label="stop",
|
214 |
value="",
|
215 |
lines=1,
|
|
|
261 |
lines=1,
|
262 |
)
|
263 |
|
264 |
+
with gr.Accordion(i18n("网络设置"), open=False):
|
265 |
# 优先展示自定义的api_host
|
266 |
apihostTxt = gr.Textbox(
|
267 |
show_label=True,
|
|
|
269 |
label="API-Host",
|
270 |
value=config.api_host or shared.API_HOST,
|
271 |
lines=1,
|
272 |
+
container=False,
|
273 |
)
|
274 |
changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
|
275 |
proxyTxt = gr.Textbox(
|
|
|
278 |
label=i18n("代理地址(示例:http://127.0.0.1:10809)"),
|
279 |
value="",
|
280 |
lines=2,
|
281 |
+
container=False,
|
282 |
)
|
283 |
changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
|
284 |
default_btn = gr.Button(i18n("🔙 恢复默认设置"))
|
|
|
342 |
outputs=[saveFileName, systemPromptTxt, chatbot]
|
343 |
)
|
344 |
|
345 |
+
refresh_history_args = dict(
|
346 |
+
fn=get_history_names, inputs=[gr.State(False), user_name], outputs=[historyFileSelectDropdown]
|
347 |
+
)
|
348 |
+
|
349 |
|
350 |
# Chatbot
|
351 |
cancelBtn.click(interrupt, [current_model], [])
|
|
|
364 |
inputs=[current_model],
|
365 |
outputs=[chatbot, status_display],
|
366 |
show_progress=True,
|
367 |
+
_js='()=>{clearHistoryHtml();}',
|
368 |
)
|
369 |
|
370 |
retryBtn.click(**start_outputing_args).then(
|
|
|
415 |
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
416 |
keyTxt.submit(**get_usage_args)
|
417 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
418 |
+
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown, user_api_key, keyTxt], show_progress=True, api_name="get_model")
|
419 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
420 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True)
|
421 |
|
|
|
449 |
downloadFile,
|
450 |
show_progress=True,
|
451 |
)
|
452 |
+
historyRefreshBtn.click(**refresh_history_args)
|
453 |
+
historyDeleteBtn.click(delete_chat_history, [current_model, historyFileSelectDropdown, user_name], [status_display, historyFileSelectDropdown, chatbot], _js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}')
|
454 |
historyFileSelectDropdown.change(**load_history_from_file_args)
|
455 |
downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
|
456 |
|
|
|
481 |
[status_display],
|
482 |
show_progress=True,
|
483 |
)
|
484 |
+
checkUpdateBtn.click(fn=None, _js='()=>{manualCheckUpdate();}')
|
485 |
|
486 |
logging.info(
|
487 |
colorama.Back.GREEN
|
|
|
495 |
reload_javascript()
|
496 |
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
497 |
blocked_paths=["config.json"],
|
498 |
+
server_name=server_name,
|
499 |
+
server_port=server_port,
|
500 |
+
share=share,
|
501 |
+
auth=auth_list if authflag else None,
|
502 |
+
favicon_path="./assets/favicon.ico",
|
503 |
+
inbrowser=not dockerflag, # 禁止在docker下开启inbrowser
|
504 |
)
|
Dockerfile
CHANGED
@@ -1,15 +1,18 @@
|
|
1 |
-
FROM python:3.9 as builder
|
2 |
-
RUN apt-get update
|
|
|
|
|
|
|
3 |
COPY requirements.txt .
|
4 |
COPY requirements_advanced.txt .
|
5 |
-
RUN pip install --user -r requirements.txt
|
6 |
-
# RUN pip install --user -r requirements_advanced.txt
|
7 |
|
8 |
-
FROM python:3.9
|
9 |
-
|
10 |
COPY --from=builder /root/.local /root/.local
|
11 |
ENV PATH=/root/.local/bin:$PATH
|
12 |
COPY . /app
|
13 |
WORKDIR /app
|
14 |
-
ENV dockerrun
|
15 |
-
CMD ["python3", "-u", "ChuanhuChatbot.py",
|
|
|
1 |
+
FROM python:3.9-slim-buster as builder
|
2 |
+
RUN apt-get update \
|
3 |
+
&& apt-get install -y build-essential \
|
4 |
+
&& apt-get clean \
|
5 |
+
&& rm -rf /var/lib/apt/lists/*
|
6 |
COPY requirements.txt .
|
7 |
COPY requirements_advanced.txt .
|
8 |
+
RUN pip install --user --no-cache-dir -r requirements.txt
|
9 |
+
# RUN pip install --user --no-cache-dir -r requirements_advanced.txt
|
10 |
|
11 |
+
FROM python:3.9-slim-buster
|
12 |
+
LABEL maintainer="iskoldt"
|
13 |
COPY --from=builder /root/.local /root/.local
|
14 |
ENV PATH=/root/.local/bin:$PATH
|
15 |
COPY . /app
|
16 |
WORKDIR /app
|
17 |
+
ENV dockerrun=yes
|
18 |
+
CMD ["python3", "-u", "ChuanhuChatbot.py","2>&1", "|", "tee", "/var/log/application.log"]
|
README.md
CHANGED
@@ -1,14 +1,142 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div align="right">
|
2 |
+
<!-- 语言: -->
|
3 |
+
简体中文 | <a title="English" href="./readme/README_en.md">English</a> | <a title="Japanese" href="./readme/README_ja.md">日本語</a>
|
4 |
+
</div>
|
5 |
+
|
6 |
+
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
+
<div align="center">
|
8 |
+
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
+
<img src="https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63" alt="Logo" height="156">
|
10 |
+
</a>
|
11 |
+
|
12 |
+
<p align="center">
|
13 |
+
<h3>为ChatGPT等多种LLM提供了一个轻快好用的Web图形界面和众多附加功能</h3>
|
14 |
+
<p align="center">
|
15 |
+
<a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE">
|
16 |
+
<img alt="Tests Passing" src="https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT" />
|
17 |
+
</a>
|
18 |
+
<a href="https://gradio.app/">
|
19 |
+
<img alt="GitHub Contributors" src="https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat" />
|
20 |
+
</a>
|
21 |
+
<a href="https://t.me/tkdifferent">
|
22 |
+
<img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
|
23 |
+
</a>
|
24 |
+
<p>
|
25 |
+
流式传输 / 无限对话 / 保存对话 / 预设Prompt集 / 联网搜索 / 根据文件回答 <br />
|
26 |
+
渲染LaTeX / 渲染表格 / 代码高亮 / 自动亮暗色切换 / 自适应界面 / “小而美”的体验 <br />
|
27 |
+
自定义api-Host / 多参数可调 / 多API Key均衡负载 / 多用户显示 / 适配GPT-4 / 支持本地部署LLM
|
28 |
+
</p>
|
29 |
+
<a href="https://www.bilibili.com/video/BV1mo4y1r7eE"><strong>视频教程</strong></a>
|
30 |
+
·
|
31 |
+
<a href="https://www.bilibili.com/video/BV1184y1w7aP"><strong>2.0介绍视频</strong></a>
|
32 |
+
||
|
33 |
+
<a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT"><strong>在线体验</strong></a>
|
34 |
+
·
|
35 |
+
<a href="https://huggingface.co/login?next=%2Fspaces%2FJohnSmith9982%2FChuanhuChatGPT%3Fduplicate%3Dtrue"><strong>一键部署</strong></a>
|
36 |
+
</p>
|
37 |
+
<p align="center">
|
38 |
+
<img alt="Animation Demo" src="https://user-images.githubusercontent.com/51039745/226255695-6b17ff1f-ea8d-464f-b69b-a7b6b68fffe8.gif" />
|
39 |
+
</p>
|
40 |
+
</p>
|
41 |
+
</div>
|
42 |
+
|
43 |
+
## 目录
|
44 |
+
|
45 |
+
| [支持模型](#支持模型) | [使用技巧](#使用技巧) | [安装方式](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) | [常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) | [给作者买可乐🥤](#捐款) |
|
46 |
+
| ------------------ | ------------------ | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------- |
|
47 |
+
|
48 |
+
## 支持模型
|
49 |
+
|
50 |
+
**通过API调用的语言模型**:
|
51 |
+
|
52 |
+
- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
|
53 |
+
- [Google PaLM](https://developers.generativeai.google/products/palm)
|
54 |
+
- [Inspur Yuan 1.0](https://air.inspur.com/home)
|
55 |
+
- [MiniMax](https://api.minimax.chat/)
|
56 |
+
- [XMChat](https://github.com/MILVLG/xmchat)
|
57 |
+
|
58 |
+
**本地部署语言模型**:
|
59 |
+
|
60 |
+
- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
|
61 |
+
- [LLaMA](https://github.com/facebookresearch/llama)
|
62 |
+
- [StableLM](https://github.com/Stability-AI/StableLM)
|
63 |
+
- [MOSS](https://github.com/OpenLMLab/MOSS)
|
64 |
+
|
65 |
+
## 使用技巧
|
66 |
+
|
67 |
+
- 使用System Prompt可以很有效地设定前提条件。
|
68 |
+
- 使用Prompt模板功能时,选择Prompt模板集合文件,然后从下拉菜单中选择想要的prompt。
|
69 |
+
- 如果回答不满意,可以使用 `重新生成`按钮再试一次
|
70 |
+
- 输入框支持换行,按 `shift enter`即可。
|
71 |
+
- 可以在输入框按上下箭头在输入历史之间切换
|
72 |
+
- 部署到服务器:在 `config.json` 中设置 `"server_name": "0.0.0.0", "server_port": <你的端口号>,`。
|
73 |
+
- 获取公共链接:在 `config.json` 中设置 `"share": true,`。注意程序必须在运行,才能通过公共链接访问。
|
74 |
+
- 在Hugging Face上使用:建议在右上角 **复制Space** 再使用,这样App反应可能会快一点。
|
75 |
+
|
76 |
+
## 快速上手
|
77 |
+
|
78 |
+
```shell
|
79 |
+
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
80 |
+
cd ChuanhuChatGPT
|
81 |
+
pip install -r requirements.txt
|
82 |
+
```
|
83 |
+
|
84 |
+
然后,在项目文件夹中复制一份 `config_example.json`,并将其重命名为 `config.json`,在其中填入 `API-Key` 等设置。
|
85 |
+
|
86 |
+
```shell
|
87 |
+
python ChuanhuChatbot.py
|
88 |
+
```
|
89 |
+
|
90 |
+
一个浏览器窗口将会自动打开,此时您将可以使用 **川虎Chat** 与ChatGPT或其他模型进行对话。
|
91 |
+
|
92 |
+
> **Note**
|
93 |
+
>
|
94 |
+
> 具体详尽的安装教程和使用教程请查看[本项目的wiki页面](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)。
|
95 |
+
|
96 |
+
## 疑难杂症解决
|
97 |
+
|
98 |
+
在遇到各种问题查阅相关信息前,您可以先尝试手动拉取本项目的最新更改并更新依赖库,然后重试。步骤为:
|
99 |
+
|
100 |
+
1. 点击网页上的 `Download ZIP` 下载最新代码,或
|
101 |
+
```shell
|
102 |
+
git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
|
103 |
+
```
|
104 |
+
2. 尝试再次安装依赖(可能本项目引入了新的依赖)
|
105 |
+
```
|
106 |
+
pip install -r requirements.txt
|
107 |
+
```
|
108 |
+
|
109 |
+
很多时候,这样就可以解决问题。
|
110 |
+
|
111 |
+
如果问题仍然存在,请查阅该页面:[常见问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
|
112 |
+
|
113 |
+
该页面列出了**几乎所有**您可能遇到的各种问题,包括如何配置代理,以及遇到问题后您该采取的措施,**请务必认真阅读**。
|
114 |
+
|
115 |
+
## 了解更多
|
116 |
+
|
117 |
+
若需了解更多信息,请查看我们的 [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki):
|
118 |
+
|
119 |
+
- [想要做出贡献?](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
|
120 |
+
- [项目更新情况?](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
|
121 |
+
- [二次开发许可?](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
|
122 |
+
- [如何引用项目?](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
|
123 |
+
|
124 |
+
## Starchart
|
125 |
+
|
126 |
+
[](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
|
127 |
+
|
128 |
+
## Contributors
|
129 |
+
|
130 |
+
<a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
|
131 |
+
<img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
|
132 |
+
</a>
|
133 |
+
|
134 |
+
## 捐款
|
135 |
+
|
136 |
+
🐯如果觉得这个软件对你有所帮助,欢迎请作者喝可乐、喝咖啡~
|
137 |
+
|
138 |
+
联系作者:请去[我的bilibili账号](https://space.bilibili.com/29125536)私信我。
|
139 |
+
|
140 |
+
<a href="https://www.buymeacoffee.com/ChuanhuChat" ><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=219d53&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
|
141 |
+
|
142 |
+
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|
assets/custom.css
CHANGED
@@ -7,8 +7,11 @@
|
|
7 |
--message-user-background-color-dark: #26B561;
|
8 |
--message-bot-background-color-light: #FFFFFF;
|
9 |
--message-bot-background-color-dark: #2C2C2C;
|
|
|
|
|
10 |
}
|
11 |
|
|
|
12 |
#app_title {
|
13 |
font-weight: var(--prose-header-text-weight);
|
14 |
font-size: var(--text-xxl);
|
@@ -22,6 +25,19 @@
|
|
22 |
margin: 32px 0 4px 0;
|
23 |
}
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
/* gradio的页脚信息 */
|
26 |
footer {
|
27 |
/* display: none !important; */
|
@@ -43,21 +59,74 @@ footer {
|
|
43 |
position: absolute;
|
44 |
max-height: 30px;
|
45 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
/* user_info */
|
47 |
-
#user_info {
|
48 |
white-space: nowrap;
|
49 |
-
position: absolute; left: 8em; top: .
|
50 |
z-index: var(--layer-2);
|
51 |
box-shadow: var(--block-shadow);
|
52 |
-
border: none; border-radius: var(--block-label-radius);
|
53 |
background: var(--color-accent);
|
54 |
padding: var(--block-label-padding);
|
55 |
font-size: var(--block-label-text-size); line-height: var(--line-sm);
|
56 |
-
width: auto;
|
57 |
opacity: 1;
|
58 |
transition: opacity 0.3s ease-in-out;
|
59 |
}
|
60 |
-
#user_info .wrap {
|
61 |
opacity: 0;
|
62 |
}
|
63 |
#user_info p {
|
@@ -94,7 +163,7 @@ footer {
|
|
94 |
.insert_block {
|
95 |
position: relative;
|
96 |
margin: 0;
|
97 |
-
padding:
|
98 |
box-shadow: var(--block-shadow);
|
99 |
border-width: var(--block-border-width);
|
100 |
border-color: var(--block-border-color);
|
@@ -132,13 +201,42 @@ footer {
|
|
132 |
line-height: 20px;
|
133 |
}
|
134 |
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
top: 2px;
|
137 |
display: inline-block;
|
138 |
-
height:
|
139 |
position: relative;
|
140 |
-
width:
|
141 |
-
border-radius:
|
|
|
142 |
}
|
143 |
.apSwitch input {
|
144 |
display: none !important;
|
@@ -152,12 +250,11 @@ footer {
|
|
152 |
right: 0;
|
153 |
top: 0;
|
154 |
transition: .4s;
|
155 |
-
font-size:
|
156 |
-
border-radius:
|
157 |
}
|
158 |
.apSlider::before {
|
159 |
-
|
160 |
-
left: 1px;
|
161 |
position: absolute;
|
162 |
transition: .4s;
|
163 |
content: "🌞";
|
@@ -166,8 +263,63 @@ input:checked + .apSlider {
|
|
166 |
background-color: var(--primary-600);
|
167 |
}
|
168 |
input:checked + .apSlider::before {
|
169 |
-
transform: translateX(
|
170 |
content:"🌚";
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
}
|
172 |
|
173 |
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
|
@@ -204,6 +356,45 @@ input[type=range]::-webkit-slider-runnable-track {
|
|
204 |
background: transparent;
|
205 |
}
|
206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
#submit_btn, #cancel_btn {
|
208 |
height: 42px !important;
|
209 |
}
|
@@ -249,7 +440,7 @@ ol:not(.options), ul:not(.options) {
|
|
249 |
#chuanhu_chatbot {
|
250 |
height: calc(100vh - 200px);
|
251 |
}
|
252 |
-
#chuanhu_chatbot
|
253 |
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
254 |
}
|
255 |
}
|
@@ -258,7 +449,7 @@ ol:not(.options), ul:not(.options) {
|
|
258 |
#chuanhu_chatbot {
|
259 |
height: calc(100vh - 140px);
|
260 |
}
|
261 |
-
#chuanhu_chatbot
|
262 |
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
263 |
}
|
264 |
[data-testid = "bot"] {
|
@@ -268,7 +459,7 @@ ol:not(.options), ul:not(.options) {
|
|
268 |
letter-spacing: -1px; font-size: 22px;
|
269 |
}
|
270 |
}
|
271 |
-
#chuanhu_chatbot
|
272 |
overflow-x: hidden;
|
273 |
}
|
274 |
/* 对话气泡 */
|
@@ -364,7 +555,7 @@ ol:not(.options), ul:not(.options) {
|
|
364 |
}
|
365 |
|
366 |
/* history message */
|
367 |
-
.wrap>.history-message {
|
368 |
padding: 10px !important;
|
369 |
}
|
370 |
.history-message {
|
@@ -383,7 +574,7 @@ ol:not(.options), ul:not(.options) {
|
|
383 |
.history-message>.message {
|
384 |
margin-bottom: 16px;
|
385 |
}
|
386 |
-
.wrap>.history-message::after {
|
387 |
content: "";
|
388 |
display: block;
|
389 |
height: 2px;
|
@@ -392,7 +583,7 @@ ol:not(.options), ul:not(.options) {
|
|
392 |
margin-top: -10px;
|
393 |
clear: both;
|
394 |
}
|
395 |
-
.wrap>.history-message>:last-child::after {
|
396 |
content: "仅供查看";
|
397 |
display: block;
|
398 |
text-align: center;
|
|
|
7 |
--message-user-background-color-dark: #26B561;
|
8 |
--message-bot-background-color-light: #FFFFFF;
|
9 |
--message-bot-background-color-dark: #2C2C2C;
|
10 |
+
--switch-checkbox-color-light: #e5e7eb;
|
11 |
+
--switch-checkbox-color-dark: #515151;
|
12 |
}
|
13 |
|
14 |
+
|
15 |
#app_title {
|
16 |
font-weight: var(--prose-header-text-weight);
|
17 |
font-size: var(--text-xxl);
|
|
|
25 |
margin: 32px 0 4px 0;
|
26 |
}
|
27 |
|
28 |
+
/* 解决container=False时的错误填充 */
|
29 |
+
div.form {
|
30 |
+
background: none !important;
|
31 |
+
}
|
32 |
+
|
33 |
+
/* 高级页面 */
|
34 |
+
#advanced_warning {
|
35 |
+
display: flex;
|
36 |
+
flex-wrap: wrap;
|
37 |
+
flex-direction: column;
|
38 |
+
align-content: center;
|
39 |
+
}
|
40 |
+
|
41 |
/* gradio的页脚信息 */
|
42 |
footer {
|
43 |
/* display: none !important; */
|
|
|
59 |
position: absolute;
|
60 |
max-height: 30px;
|
61 |
}
|
62 |
+
#toast-update {
|
63 |
+
position: absolute;
|
64 |
+
display: flex;
|
65 |
+
top: -500px;
|
66 |
+
width: 100%;
|
67 |
+
justify-content: center;
|
68 |
+
z-index: var(--layer-top);
|
69 |
+
transition: top 0.3s ease-out;
|
70 |
+
}
|
71 |
+
#check-chuanhu-update {
|
72 |
+
position: absolute;
|
73 |
+
align-items: center;
|
74 |
+
display: flex;
|
75 |
+
flex-direction: column;
|
76 |
+
justify-content: center;
|
77 |
+
margin: var(--size-6) var(--size-4);
|
78 |
+
box-shadow: var(--shadow-drop-lg);
|
79 |
+
border: 1px solid var(--block-label-border-color);
|
80 |
+
border-radius: var(--container-radius);
|
81 |
+
background: var(--background-fill-primary);
|
82 |
+
padding: var(--size-4) var(--size-6);
|
83 |
+
min-width: 360px;
|
84 |
+
max-width: 480px;
|
85 |
+
overflow: hidden;
|
86 |
+
pointer-events: auto;
|
87 |
+
}
|
88 |
+
#version-info-title {
|
89 |
+
font-size: 1.2em;
|
90 |
+
font-weight: bold;
|
91 |
+
text-align: start;
|
92 |
+
width: 100%;
|
93 |
+
}
|
94 |
+
#release-note-wrap {
|
95 |
+
width: 100%;
|
96 |
+
max-width: 400px;
|
97 |
+
height: 120px;
|
98 |
+
border: solid 1px var(--border-color-primary);
|
99 |
+
overflow: auto;
|
100 |
+
padding: 0 8px;
|
101 |
+
}
|
102 |
+
#release-note-wrap.hideK {
|
103 |
+
display: none;
|
104 |
+
}
|
105 |
+
.btn-update-group {
|
106 |
+
display: flex;
|
107 |
+
justify-content: space-evenly;
|
108 |
+
align-items: center;
|
109 |
+
width: 100%;
|
110 |
+
padding-top: 10px;
|
111 |
+
}
|
112 |
+
.btn-update-group.hideK {
|
113 |
+
display: none;
|
114 |
+
}
|
115 |
/* user_info */
|
116 |
+
#user_info.block {
|
117 |
white-space: nowrap;
|
118 |
+
position: absolute; left: 8em; top: .8em;
|
119 |
z-index: var(--layer-2);
|
120 |
box-shadow: var(--block-shadow);
|
121 |
+
border: none!important; border-radius: var(--block-label-radius);
|
122 |
background: var(--color-accent);
|
123 |
padding: var(--block-label-padding);
|
124 |
font-size: var(--block-label-text-size); line-height: var(--line-sm);
|
125 |
+
width: auto; max-height: 30px!important;
|
126 |
opacity: 1;
|
127 |
transition: opacity 0.3s ease-in-out;
|
128 |
}
|
129 |
+
#user_info.block .wrap {
|
130 |
opacity: 0;
|
131 |
}
|
132 |
#user_info p {
|
|
|
163 |
.insert_block {
|
164 |
position: relative;
|
165 |
margin: 0;
|
166 |
+
padding: 8px 12px;
|
167 |
box-shadow: var(--block-shadow);
|
168 |
border-width: var(--block-border-width);
|
169 |
border-color: var(--block-border-color);
|
|
|
201 |
line-height: 20px;
|
202 |
}
|
203 |
|
204 |
+
/* 亮暗色模式切换 */
|
205 |
+
#apSwitch input[type="checkbox"] {
|
206 |
+
margin: 0 !important;
|
207 |
+
}
|
208 |
+
#apSwitch label.apSwitch {
|
209 |
+
display: flex;
|
210 |
+
align-items: center;
|
211 |
+
cursor: pointer;
|
212 |
+
color: var(--body-text-color);
|
213 |
+
font-weight: var(--checkbox-label-text-weight);
|
214 |
+
font-size: var(--checkbox-label-text-size);
|
215 |
+
line-height: var(--line-md);
|
216 |
+
margin: 2px 0 !important;
|
217 |
+
}
|
218 |
+
input[type="checkbox"]#apSwitch_checkbox::before {
|
219 |
+
background: none !important;
|
220 |
+
content: '🌞';
|
221 |
+
border: none !important;
|
222 |
+
box-shadow: none !important;
|
223 |
+
font-size: 22px;
|
224 |
+
top: -4.4px;
|
225 |
+
left: -1px;
|
226 |
+
}
|
227 |
+
input:checked[type="checkbox"]#apSwitch_checkbox::before {
|
228 |
+
content: '🌚';
|
229 |
+
left: 16px;
|
230 |
+
}
|
231 |
+
|
232 |
+
/* .apSwitch {
|
233 |
top: 2px;
|
234 |
display: inline-block;
|
235 |
+
height: 22px;
|
236 |
position: relative;
|
237 |
+
width: 40px;
|
238 |
+
border-radius: 11px;
|
239 |
+
box-shadow: inset 0 0 1px 0 rgba(0,0,0,0.05), inset 0 0 2px 0 rgba(0,0,0,0.08) !important;
|
240 |
}
|
241 |
.apSwitch input {
|
242 |
display: none !important;
|
|
|
250 |
right: 0;
|
251 |
top: 0;
|
252 |
transition: .4s;
|
253 |
+
font-size: 22px;
|
254 |
+
border-radius: 11px;
|
255 |
}
|
256 |
.apSlider::before {
|
257 |
+
transform: scale(0.9);
|
|
|
258 |
position: absolute;
|
259 |
transition: .4s;
|
260 |
content: "🌞";
|
|
|
263 |
background-color: var(--primary-600);
|
264 |
}
|
265 |
input:checked + .apSlider::before {
|
266 |
+
transform: translateX(18px);
|
267 |
content:"🌚";
|
268 |
+
} */
|
269 |
+
|
270 |
+
.switch_checkbox label {
|
271 |
+
flex-direction: row-reverse;
|
272 |
+
justify-content: space-between;
|
273 |
+
}
|
274 |
+
.switch_checkbox input[type="checkbox"] + span {
|
275 |
+
margin-left: 0 !important;
|
276 |
+
}
|
277 |
+
|
278 |
+
.switch_checkbox input[type="checkbox"] {
|
279 |
+
-moz-appearance: none;
|
280 |
+
appearance: none;
|
281 |
+
-webkit-appearance: none;
|
282 |
+
outline: none;
|
283 |
+
}
|
284 |
+
|
285 |
+
.switch_checkbox input[type="checkbox"] {
|
286 |
+
display: inline-block !important;
|
287 |
+
position: relative !important;
|
288 |
+
border: none !important;
|
289 |
+
outline: none;
|
290 |
+
width: 40px !important;
|
291 |
+
height: 22px !important;
|
292 |
+
border-radius: 11px !important;
|
293 |
+
background-image: none !important;
|
294 |
+
box-shadow: inset 0 0 1px 0 rgba(0,0,0,0.05), inset 0 0 2px 0 rgba(0,0,0,0.08) !important;
|
295 |
+
background-image: none !important;
|
296 |
+
background-color: var(--switch-checkbox-color-light) !important;
|
297 |
+
transition: .2s ease background-color;
|
298 |
+
}
|
299 |
+
.dark .switch_checkbox input[type="checkbox"] {
|
300 |
+
background-color: var(--switch-checkbox-color-dark) !important;
|
301 |
+
}
|
302 |
+
.switch_checkbox input[type="checkbox"]::before {
|
303 |
+
content: "";
|
304 |
+
position: absolute;
|
305 |
+
width: 22px;
|
306 |
+
height: 22px;
|
307 |
+
top: 0;
|
308 |
+
left: 0;
|
309 |
+
background: #FFFFFF;
|
310 |
+
border: 0.5px solid rgba(0,0,0,0.02);
|
311 |
+
box-shadow: 0 0 0 0 rgba(0,0,0,0.15), 0 1px 0 0 rgba(0,0,0,0.05);
|
312 |
+
transform: scale(0.9);
|
313 |
+
border-radius: 11px !important;
|
314 |
+
transition: .4s ease all;
|
315 |
+
box-shadow: var(--input-shadow);
|
316 |
+
}
|
317 |
+
.switch_checkbox input:checked[type="checkbox"] {
|
318 |
+
background-color: var(--primary-600) !important;
|
319 |
+
}
|
320 |
+
.switch_checkbox input:checked[type="checkbox"]::before {
|
321 |
+
background-color: #fff;
|
322 |
+
left: 18px;
|
323 |
}
|
324 |
|
325 |
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
|
|
|
356 |
background: transparent;
|
357 |
}
|
358 |
|
359 |
+
hr.append-display {
|
360 |
+
margin: 8px 0;
|
361 |
+
border: none;
|
362 |
+
height: 1px;
|
363 |
+
border-top-width: 0;
|
364 |
+
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
|
365 |
+
}
|
366 |
+
.source-a {
|
367 |
+
font-size: 0.8em;
|
368 |
+
max-width: 100%;
|
369 |
+
margin: 0;
|
370 |
+
display: flex;
|
371 |
+
flex-direction: row;
|
372 |
+
flex-wrap: wrap;
|
373 |
+
align-items: center;
|
374 |
+
/* background-color: #dddddd88; */
|
375 |
+
border-radius: 1.5rem;
|
376 |
+
padding: 0.2em;
|
377 |
+
}
|
378 |
+
.source-a a {
|
379 |
+
display: inline-block;
|
380 |
+
background-color: #aaaaaa50;
|
381 |
+
border-radius: 1rem;
|
382 |
+
padding: 0.5em;
|
383 |
+
text-align: center;
|
384 |
+
text-overflow: ellipsis;
|
385 |
+
overflow: hidden;
|
386 |
+
min-width: 20%;
|
387 |
+
white-space: nowrap;
|
388 |
+
margin: 0.2rem 0.1rem;
|
389 |
+
text-decoration: none !important;
|
390 |
+
flex: 1;
|
391 |
+
transition: flex 0.5s;
|
392 |
+
}
|
393 |
+
.source-a a:hover {
|
394 |
+
background-color: #aaaaaa20;
|
395 |
+
flex: 2;
|
396 |
+
}
|
397 |
+
|
398 |
#submit_btn, #cancel_btn {
|
399 |
height: 42px !important;
|
400 |
}
|
|
|
440 |
#chuanhu_chatbot {
|
441 |
height: calc(100vh - 200px);
|
442 |
}
|
443 |
+
#chuanhu_chatbot>.wrapper>.wrap {
|
444 |
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
445 |
}
|
446 |
}
|
|
|
449 |
#chuanhu_chatbot {
|
450 |
height: calc(100vh - 140px);
|
451 |
}
|
452 |
+
#chuanhu_chatbot>.wrapper>.wrap {
|
453 |
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
454 |
}
|
455 |
[data-testid = "bot"] {
|
|
|
459 |
letter-spacing: -1px; font-size: 22px;
|
460 |
}
|
461 |
}
|
462 |
+
#chuanhu_chatbot>.wrapper>.wrap {
|
463 |
overflow-x: hidden;
|
464 |
}
|
465 |
/* 对话气泡 */
|
|
|
555 |
}
|
556 |
|
557 |
/* history message */
|
558 |
+
.wrapper>.wrap>.history-message {
|
559 |
padding: 10px !important;
|
560 |
}
|
561 |
.history-message {
|
|
|
574 |
.history-message>.message {
|
575 |
margin-bottom: 16px;
|
576 |
}
|
577 |
+
.wrapper>.wrap>.history-message::after {
|
578 |
content: "";
|
579 |
display: block;
|
580 |
height: 2px;
|
|
|
583 |
margin-top: -10px;
|
584 |
clear: both;
|
585 |
}
|
586 |
+
.wrapper>.wrap>.history-message>:last-child::after {
|
587 |
content: "仅供查看";
|
588 |
display: block;
|
589 |
text-align: center;
|
assets/custom.js
CHANGED
@@ -15,26 +15,60 @@ var appTitleDiv = null;
|
|
15 |
var chatbot = null;
|
16 |
var chatbotWrap = null;
|
17 |
var apSwitch = null;
|
18 |
-
var empty_botton = null;
|
19 |
var messageBotDivs = null;
|
20 |
var loginUserForm = null;
|
21 |
var logginUser = null;
|
|
|
|
|
|
|
|
|
22 |
|
23 |
var userLogged = false;
|
24 |
var usernameGotten = false;
|
25 |
var historyLoaded = false;
|
|
|
|
|
26 |
|
27 |
var ga = document.getElementsByTagName("gradio-app");
|
28 |
var targetNode = ga[0];
|
29 |
var isInIframe = (window.self !== window.top);
|
30 |
var language = navigator.language.slice(0,2);
|
|
|
31 |
|
|
|
32 |
var forView_i18n = {
|
33 |
'zh': "仅供查看",
|
34 |
'en': "For viewing only",
|
35 |
'ja': "閲覧専用",
|
|
|
36 |
'fr': "Pour consultation seulement",
|
37 |
'es': "Solo para visualización",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
};
|
39 |
|
40 |
// gradio 页面加载好了么??? 我能动你的元素了么??
|
@@ -47,9 +81,12 @@ function gradioLoaded(mutations) {
|
|
47 |
userInfoDiv = document.getElementById("user_info");
|
48 |
appTitleDiv = document.getElementById("app_title");
|
49 |
chatbot = document.querySelector('#chuanhu_chatbot');
|
50 |
-
chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrap');
|
51 |
apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
52 |
-
|
|
|
|
|
|
|
53 |
|
54 |
if (loginUserForm) {
|
55 |
localStorage.setItem("userLogged", true);
|
@@ -76,29 +113,54 @@ function gradioLoaded(mutations) {
|
|
76 |
loadHistoryHtml();
|
77 |
}
|
78 |
setChatbotScroll();
|
|
|
|
|
|
|
|
|
79 |
}
|
80 |
-
if (
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
}
|
83 |
}
|
84 |
}
|
85 |
}
|
86 |
|
87 |
function webLocale() {
|
88 |
-
console.log("webLocale", language);
|
89 |
if (forView_i18n.hasOwnProperty(language)) {
|
90 |
var forView = forView_i18n[language];
|
91 |
var forViewStyle = document.createElement('style');
|
92 |
-
forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
|
93 |
document.head.appendChild(forViewStyle);
|
94 |
-
|
|
|
|
|
|
|
95 |
}
|
96 |
}
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
function selectHistory() {
|
99 |
user_input_ta = user_input_tb.querySelector("textarea");
|
100 |
if (user_input_ta) {
|
101 |
observer.disconnect(); // 停止监听
|
|
|
102 |
// 在 textarea 上监听 keydown 事件
|
103 |
user_input_ta.addEventListener("keydown", function (event) {
|
104 |
var value = user_input_ta.value.trim();
|
@@ -143,6 +205,13 @@ function selectHistory() {
|
|
143 |
}
|
144 |
}
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
var username = null;
|
147 |
function getUserInfo() {
|
148 |
if (usernameGotten) {
|
@@ -181,8 +250,6 @@ function toggleUserInfoVisibility(shouldHide) {
|
|
181 |
}
|
182 |
}
|
183 |
function showOrHideUserInfo() {
|
184 |
-
var sendBtn = document.getElementById("submit_btn");
|
185 |
-
|
186 |
// Bind mouse/touch events to show/hide user info
|
187 |
appTitleDiv.addEventListener("mouseenter", function () {
|
188 |
toggleUserInfoVisibility(false);
|
@@ -266,22 +333,21 @@ function setChatbotHeight() {
|
|
266 |
const screenWidth = window.innerWidth;
|
267 |
const statusDisplay = document.querySelector('#status_display');
|
268 |
const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
|
269 |
-
const wrap = chatbot.querySelector('.wrap');
|
270 |
const vh = window.innerHeight * 0.01;
|
271 |
document.documentElement.style.setProperty('--vh', `${vh}px`);
|
272 |
if (isInIframe) {
|
273 |
chatbot.style.height = `700px`;
|
274 |
-
|
275 |
} else {
|
276 |
if (screenWidth <= 320) {
|
277 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
|
278 |
-
|
279 |
} else if (screenWidth <= 499) {
|
280 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
|
281 |
-
|
282 |
} else {
|
283 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
|
284 |
-
|
285 |
}
|
286 |
}
|
287 |
}
|
@@ -321,12 +387,12 @@ function addChuanhuButton(botElement) {
|
|
321 |
}
|
322 |
return;
|
323 |
}
|
324 |
-
var
|
325 |
-
var
|
326 |
-
|
327 |
-
|
328 |
-
if (
|
329 |
-
if (
|
330 |
|
331 |
// Copy bot button
|
332 |
var copyButton = document.createElement('button');
|
@@ -334,19 +400,34 @@ function addChuanhuButton(botElement) {
|
|
334 |
copyButton.classList.add('copy-bot-btn');
|
335 |
copyButton.setAttribute('aria-label', 'Copy');
|
336 |
copyButton.innerHTML = copyIcon;
|
337 |
-
copyButton.addEventListener('click', () => {
|
338 |
const textToCopy = rawMessage.innerText;
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
copyButton.innerHTML = copiedIcon;
|
343 |
setTimeout(() => {
|
344 |
copyButton.innerHTML = copyIcon;
|
345 |
}, 1500);
|
346 |
-
}
|
347 |
-
|
348 |
-
|
349 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
});
|
351 |
botElement.appendChild(copyButton);
|
352 |
|
@@ -386,44 +467,48 @@ function removeMarkdownText(message) {
|
|
386 |
let timeoutId;
|
387 |
let isThrottled = false;
|
388 |
var mmutation
|
389 |
-
//
|
390 |
var mObserver = new MutationObserver(function (mutationsList) {
|
391 |
for (mmutation of mutationsList) {
|
392 |
if (mmutation.type === 'childList') {
|
393 |
for (var node of mmutation.addedNodes) {
|
394 |
-
if (node.nodeType === 1 && node.classList.contains('message')
|
395 |
saveHistoryHtml();
|
396 |
-
|
397 |
-
|
398 |
-
if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') {
|
399 |
-
setSlider();
|
400 |
}
|
401 |
}
|
402 |
for (var node of mmutation.removedNodes) {
|
403 |
-
if (node.nodeType === 1 && node.classList.contains('message')
|
404 |
saveHistoryHtml();
|
405 |
-
|
|
|
406 |
}
|
407 |
}
|
408 |
} else if (mmutation.type === 'attributes') {
|
409 |
-
if (
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
}
|
419 |
}
|
420 |
}
|
421 |
});
|
422 |
-
mObserver.observe(
|
|
|
|
|
|
|
|
|
|
|
423 |
|
424 |
var loadhistorytime = 0; // for debugging
|
425 |
function saveHistoryHtml() {
|
426 |
-
var historyHtml = document.querySelector('#chuanhu_chatbot
|
|
|
427 |
localStorage.setItem('chatHistory', historyHtml.innerHTML);
|
428 |
// console.log("History Saved")
|
429 |
historyLoaded = false;
|
@@ -474,12 +559,98 @@ function clearHistoryHtml() {
|
|
474 |
console.log("History Cleared");
|
475 |
}
|
476 |
}
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
481 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
483 |
// 监视页面内部 DOM 变动
|
484 |
var observer = new MutationObserver(function (mutations) {
|
485 |
gradioLoaded(mutations);
|
@@ -492,9 +663,43 @@ window.addEventListener("DOMContentLoaded", function () {
|
|
492 |
historyLoaded = false;
|
493 |
});
|
494 |
window.addEventListener('resize', setChatbotHeight);
|
495 |
-
window.addEventListener('scroll', setChatbotHeight);
|
496 |
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
|
497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
498 |
// button svg code
|
499 |
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
500 |
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
|
|
15 |
var chatbot = null;
|
16 |
var chatbotWrap = null;
|
17 |
var apSwitch = null;
|
|
|
18 |
var messageBotDivs = null;
|
19 |
var loginUserForm = null;
|
20 |
var logginUser = null;
|
21 |
+
var updateToast = null;
|
22 |
+
var sendBtn = null;
|
23 |
+
var cancelBtn = null;
|
24 |
+
var sliders = null;
|
25 |
|
26 |
var userLogged = false;
|
27 |
var usernameGotten = false;
|
28 |
var historyLoaded = false;
|
29 |
+
var updateInfoGotten = false;
|
30 |
+
var isLatestVersion = localStorage.getItem('isLatestVersion') || false;
|
31 |
|
32 |
var ga = document.getElementsByTagName("gradio-app");
|
33 |
var targetNode = ga[0];
|
34 |
var isInIframe = (window.self !== window.top);
|
35 |
var language = navigator.language.slice(0,2);
|
36 |
+
var currentTime = new Date().getTime();
|
37 |
|
38 |
+
// i18n
|
39 |
var forView_i18n = {
|
40 |
'zh': "仅供查看",
|
41 |
'en': "For viewing only",
|
42 |
'ja': "閲覧専用",
|
43 |
+
'ko': "읽기 전용",
|
44 |
'fr': "Pour consultation seulement",
|
45 |
'es': "Solo para visualización",
|
46 |
+
'sv': "Endast för visning",
|
47 |
+
};
|
48 |
+
|
49 |
+
var deleteConfirm_i18n_pref = {
|
50 |
+
'zh': "你真的要删除 ",
|
51 |
+
'en': "Are you sure you want to delete ",
|
52 |
+
'ja': "本当に ",
|
53 |
+
'ko': "정말로 ",
|
54 |
+
'sv': "Är du säker på att du vill ta bort "
|
55 |
+
};
|
56 |
+
var deleteConfirm_i18n_suff = {
|
57 |
+
'zh': " 吗?",
|
58 |
+
'en': " ?",
|
59 |
+
'ja': " を削除してもよろしいですか?",
|
60 |
+
'ko': " 을(를) 삭제하시겠습니까?",
|
61 |
+
'sv': " ?"
|
62 |
+
};
|
63 |
+
var deleteConfirm_msg_pref = "Are you sure you want to delete ";
|
64 |
+
var deleteConfirm_msg_suff = " ?";
|
65 |
+
|
66 |
+
var usingLatest_i18n = {
|
67 |
+
'zh': "您使用的就是最新版!",
|
68 |
+
'en': "You are using the latest version!",
|
69 |
+
'ja': "最新バージョンを使用しています!",
|
70 |
+
'ko': "최신 버전을 사용하고 있습니다!",
|
71 |
+
'sv': "Du använder den senaste versionen!"
|
72 |
};
|
73 |
|
74 |
// gradio 页面加载好了么??? 我能动你的元素了么??
|
|
|
81 |
userInfoDiv = document.getElementById("user_info");
|
82 |
appTitleDiv = document.getElementById("app_title");
|
83 |
chatbot = document.querySelector('#chuanhu_chatbot');
|
84 |
+
chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrapper > .wrap');
|
85 |
apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
86 |
+
updateToast = document.querySelector("#toast-update");
|
87 |
+
sendBtn = document.getElementById("submit_btn");
|
88 |
+
cancelBtn = document.getElementById("cancel_btn");
|
89 |
+
sliders = document.querySelectorAll('input[type="range"]');
|
90 |
|
91 |
if (loginUserForm) {
|
92 |
localStorage.setItem("userLogged", true);
|
|
|
113 |
loadHistoryHtml();
|
114 |
}
|
115 |
setChatbotScroll();
|
116 |
+
mObserver.observe(chatbotWrap, { attributes: true, childList: true, subtree: true, characterData: true});
|
117 |
+
}
|
118 |
+
if (sliders) {
|
119 |
+
setSlider();
|
120 |
}
|
121 |
+
if (updateToast) {
|
122 |
+
const lastCheckTime = localStorage.getItem('lastCheckTime') || 0;
|
123 |
+
const longTimeNoCheck = currentTime - lastCheckTime > 3 * 24 * 60 * 60 * 1000;
|
124 |
+
if (longTimeNoCheck && !updateInfoGotten && !isLatestVersion || isLatestVersion && !updateInfoGotten) {
|
125 |
+
updateLatestVersion();
|
126 |
+
}
|
127 |
+
}
|
128 |
+
if (cancelBtn) {
|
129 |
+
submitObserver.observe(cancelBtn, { attributes: true, characterData: true});
|
130 |
}
|
131 |
}
|
132 |
}
|
133 |
}
|
134 |
|
135 |
function webLocale() {
|
136 |
+
// console.log("webLocale", language);
|
137 |
if (forView_i18n.hasOwnProperty(language)) {
|
138 |
var forView = forView_i18n[language];
|
139 |
var forViewStyle = document.createElement('style');
|
140 |
+
forViewStyle.innerHTML = '.wrapper>.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
|
141 |
document.head.appendChild(forViewStyle);
|
142 |
+
}
|
143 |
+
if (deleteConfirm_i18n_pref.hasOwnProperty(language)) {
|
144 |
+
deleteConfirm_msg_pref = deleteConfirm_i18n_pref[language];
|
145 |
+
deleteConfirm_msg_suff = deleteConfirm_i18n_suff[language];
|
146 |
}
|
147 |
}
|
148 |
|
149 |
+
function showConfirmationDialog(a, file, c) {
|
150 |
+
if (file != "") {
|
151 |
+
var result = confirm(deleteConfirm_msg_pref + file + deleteConfirm_msg_suff);
|
152 |
+
if (result) {
|
153 |
+
return [a, file, c];
|
154 |
+
}
|
155 |
+
}
|
156 |
+
return [a, "CANCELED", c];
|
157 |
+
}
|
158 |
+
|
159 |
function selectHistory() {
|
160 |
user_input_ta = user_input_tb.querySelector("textarea");
|
161 |
if (user_input_ta) {
|
162 |
observer.disconnect(); // 停止监听
|
163 |
+
disableSendBtn();
|
164 |
// 在 textarea 上监听 keydown 事件
|
165 |
user_input_ta.addEventListener("keydown", function (event) {
|
166 |
var value = user_input_ta.value.trim();
|
|
|
205 |
}
|
206 |
}
|
207 |
|
208 |
+
function disableSendBtn() {
|
209 |
+
sendBtn.disabled = user_input_ta.value.trim() === '';
|
210 |
+
user_input_ta.addEventListener('input', () => {
|
211 |
+
sendBtn.disabled = user_input_ta.value.trim() === '';
|
212 |
+
});
|
213 |
+
}
|
214 |
+
|
215 |
var username = null;
|
216 |
function getUserInfo() {
|
217 |
if (usernameGotten) {
|
|
|
250 |
}
|
251 |
}
|
252 |
function showOrHideUserInfo() {
|
|
|
|
|
253 |
// Bind mouse/touch events to show/hide user info
|
254 |
appTitleDiv.addEventListener("mouseenter", function () {
|
255 |
toggleUserInfoVisibility(false);
|
|
|
333 |
const screenWidth = window.innerWidth;
|
334 |
const statusDisplay = document.querySelector('#status_display');
|
335 |
const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
|
|
|
336 |
const vh = window.innerHeight * 0.01;
|
337 |
document.documentElement.style.setProperty('--vh', `${vh}px`);
|
338 |
if (isInIframe) {
|
339 |
chatbot.style.height = `700px`;
|
340 |
+
chatbotWrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
|
341 |
} else {
|
342 |
if (screenWidth <= 320) {
|
343 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
|
344 |
+
chatbotWrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
345 |
} else if (screenWidth <= 499) {
|
346 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
|
347 |
+
chatbotWrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
348 |
} else {
|
349 |
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
|
350 |
+
chatbotWrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
351 |
}
|
352 |
}
|
353 |
}
|
|
|
387 |
}
|
388 |
return;
|
389 |
}
|
390 |
+
var oldCopyButton = null;
|
391 |
+
var oldToggleButton = null;
|
392 |
+
oldCopyButton = botElement.querySelector('button.copy-bot-btn');
|
393 |
+
oldToggleButton = botElement.querySelector('button.toggle-md-btn');
|
394 |
+
if (oldCopyButton) oldCopyButton.remove();
|
395 |
+
if (oldToggleButton) oldToggleButton.remove();
|
396 |
|
397 |
// Copy bot button
|
398 |
var copyButton = document.createElement('button');
|
|
|
400 |
copyButton.classList.add('copy-bot-btn');
|
401 |
copyButton.setAttribute('aria-label', 'Copy');
|
402 |
copyButton.innerHTML = copyIcon;
|
403 |
+
copyButton.addEventListener('click', async () => {
|
404 |
const textToCopy = rawMessage.innerText;
|
405 |
+
try {
|
406 |
+
if ("clipboard" in navigator) {
|
407 |
+
await navigator.clipboard.writeText(textToCopy);
|
408 |
copyButton.innerHTML = copiedIcon;
|
409 |
setTimeout(() => {
|
410 |
copyButton.innerHTML = copyIcon;
|
411 |
}, 1500);
|
412 |
+
} else {
|
413 |
+
const textArea = document.createElement("textarea");
|
414 |
+
textArea.value = textToCopy;
|
415 |
+
document.body.appendChild(textArea);
|
416 |
+
textArea.select();
|
417 |
+
try {
|
418 |
+
document.execCommand('copy');
|
419 |
+
copyButton.innerHTML = copiedIcon;
|
420 |
+
setTimeout(() => {
|
421 |
+
copyButton.innerHTML = copyIcon;
|
422 |
+
}, 1500);
|
423 |
+
} catch (error) {
|
424 |
+
console.error("Copy failed: ", error);
|
425 |
+
}
|
426 |
+
document.body.removeChild(textArea);
|
427 |
+
}
|
428 |
+
} catch (error) {
|
429 |
+
console.error("Copy failed: ", error);
|
430 |
+
}
|
431 |
});
|
432 |
botElement.appendChild(copyButton);
|
433 |
|
|
|
467 |
let timeoutId;
|
468 |
let isThrottled = false;
|
469 |
var mmutation
|
470 |
+
// 监听chatWrap元素的变化,为 bot 消息添加复制按钮。
|
471 |
var mObserver = new MutationObserver(function (mutationsList) {
|
472 |
for (mmutation of mutationsList) {
|
473 |
if (mmutation.type === 'childList') {
|
474 |
for (var node of mmutation.addedNodes) {
|
475 |
+
if (node.nodeType === 1 && node.classList.contains('message')) {
|
476 |
saveHistoryHtml();
|
477 |
+
disableSendBtn();
|
478 |
+
document.querySelectorAll('#chuanhu_chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
|
|
|
|
|
479 |
}
|
480 |
}
|
481 |
for (var node of mmutation.removedNodes) {
|
482 |
+
if (node.nodeType === 1 && node.classList.contains('message')) {
|
483 |
saveHistoryHtml();
|
484 |
+
disableSendBtn();
|
485 |
+
document.querySelectorAll('#chuanhu_chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
|
486 |
}
|
487 |
}
|
488 |
} else if (mmutation.type === 'attributes') {
|
489 |
+
if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_
|
490 |
+
isThrottled = true;
|
491 |
+
clearTimeout(timeoutId);
|
492 |
+
timeoutId = setTimeout(() => {
|
493 |
+
isThrottled = false;
|
494 |
+
document.querySelectorAll('#chuanhu_chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
|
495 |
+
saveHistoryHtml();
|
496 |
+
disableSendBtn();
|
497 |
+
}, 1500);
|
|
|
498 |
}
|
499 |
}
|
500 |
});
|
501 |
+
// mObserver.observe(targetNode, { attributes: true, childList: true, subtree: true, characterData: true});
|
502 |
+
|
503 |
+
var submitObserver = new MutationObserver(function (mutationsList) {
|
504 |
+
document.querySelectorAll('#chuanhu_chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
|
505 |
+
saveHistoryHtml();
|
506 |
+
});
|
507 |
|
508 |
var loadhistorytime = 0; // for debugging
|
509 |
function saveHistoryHtml() {
|
510 |
+
var historyHtml = document.querySelector('#chuanhu_chatbot>.wrapper>.wrap');
|
511 |
+
if (!historyHtml) return; // no history, do nothing
|
512 |
localStorage.setItem('chatHistory', historyHtml.innerHTML);
|
513 |
// console.log("History Saved")
|
514 |
historyLoaded = false;
|
|
|
559 |
console.log("History Cleared");
|
560 |
}
|
561 |
}
|
562 |
+
|
563 |
+
var showingUpdateInfo = false;
|
564 |
+
async function getLatestRelease() {
|
565 |
+
try {
|
566 |
+
const response = await fetch('https://api.github.com/repos/gaizhenbiao/chuanhuchatgpt/releases/latest');
|
567 |
+
if (!response.ok) {
|
568 |
+
console.log(`Error: ${response.status} - ${response.statusText}`);
|
569 |
+
updateInfoGotten = true;
|
570 |
+
return null;
|
571 |
+
}
|
572 |
+
const data = await response.json();
|
573 |
+
updateInfoGotten = true;
|
574 |
+
return data;
|
575 |
+
} catch (error) {
|
576 |
+
console.log(`Error: ${error}`);
|
577 |
+
updateInfoGotten = true;
|
578 |
+
return null;
|
579 |
+
}
|
580 |
+
}
|
581 |
+
async function updateLatestVersion() {
|
582 |
+
const currentVersionElement = document.getElementById('current-version');
|
583 |
+
const latestVersionElement = document.getElementById('latest-version-title');
|
584 |
+
const releaseNoteElement = document.getElementById('release-note-content');
|
585 |
+
const currentVersion = currentVersionElement.textContent;
|
586 |
+
const versionTime = document.getElementById('version-time').innerText;
|
587 |
+
const localVersionTime = versionTime !== "unknown" ? (new Date(versionTime)).getTime() : 0;
|
588 |
+
updateInfoGotten = true; //无论成功与否都只执行一次,否则容易api超限...
|
589 |
+
try {
|
590 |
+
const data = await getLatestRelease();
|
591 |
+
const releaseNote = data.body;
|
592 |
+
if (releaseNote) {
|
593 |
+
releaseNoteElement.innerHTML = marked.parse(releaseNote, {mangle: false, headerIds: false});
|
594 |
+
}
|
595 |
+
const latestVersion = data.tag_name;
|
596 |
+
const latestVersionTime = (new Date(data.created_at)).getTime();
|
597 |
+
if (latestVersionTime) {
|
598 |
+
if (localVersionTime < latestVersionTime) {
|
599 |
+
latestVersionElement.textContent = latestVersion;
|
600 |
+
console.log(`New version ${latestVersion} found!`);
|
601 |
+
if (!isInIframe) {openUpdateToast();}
|
602 |
+
} else {
|
603 |
+
noUpdate();
|
604 |
+
}
|
605 |
+
currentTime = new Date().getTime();
|
606 |
+
localStorage.setItem('lastCheckTime', currentTime);
|
607 |
+
}
|
608 |
+
} catch (error) {
|
609 |
+
console.error(error);
|
610 |
+
}
|
611 |
+
}
|
612 |
+
function getUpdate() {
|
613 |
+
window.open('https://github.com/gaizhenbiao/chuanhuchatgpt/releases/latest', '_blank');
|
614 |
+
closeUpdateToast();
|
615 |
+
}
|
616 |
+
function cancelUpdate() {
|
617 |
+
closeUpdateToast();
|
618 |
+
}
|
619 |
+
function openUpdateToast() {
|
620 |
+
showingUpdateInfo = true;
|
621 |
+
setUpdateWindowHeight();
|
622 |
+
}
|
623 |
+
function closeUpdateToast() {
|
624 |
+
updateToast.style.setProperty('top', '-500px');
|
625 |
+
showingUpdateInfo = false;
|
626 |
+
}
|
627 |
+
function manualCheckUpdate() {
|
628 |
+
openUpdateToast();
|
629 |
+
updateLatestVersion();
|
630 |
+
currentTime = new Date().getTime();
|
631 |
+
localStorage.setItem('lastCheckTime', currentTime);
|
632 |
}
|
633 |
+
function noUpdate() {
|
634 |
+
localStorage.setItem('isLatestVersion', 'true');
|
635 |
+
isLatestVersion = true;
|
636 |
+
const versionInfoElement = document.getElementById('version-info-title');
|
637 |
+
const releaseNoteWrap = document.getElementById('release-note-wrap');
|
638 |
+
const gotoUpdateBtn = document.getElementById('goto-update-btn');
|
639 |
+
const closeUpdateBtn = document.getElementById('close-update-btn');
|
640 |
|
641 |
+
versionInfoElement.textContent = usingLatest_i18n.hasOwnProperty(language) ? usingLatest_i18n[language] : usingLatest_i18n['en'];
|
642 |
+
releaseNoteWrap.style.setProperty('display', 'none');
|
643 |
+
gotoUpdateBtn.classList.add('hideK');
|
644 |
+
closeUpdateBtn.classList.remove('hideK');
|
645 |
+
}
|
646 |
+
function setUpdateWindowHeight() {
|
647 |
+
if (!showingUpdateInfo) {return;}
|
648 |
+
const scrollPosition = window.scrollY;
|
649 |
+
// const originalTop = updateToast.style.getPropertyValue('top');
|
650 |
+
const resultTop = scrollPosition - 20 + 'px';
|
651 |
+
updateToast.style.setProperty('top', resultTop);
|
652 |
+
}
|
653 |
+
|
654 |
// 监视页面内部 DOM 变动
|
655 |
var observer = new MutationObserver(function (mutations) {
|
656 |
gradioLoaded(mutations);
|
|
|
663 |
historyLoaded = false;
|
664 |
});
|
665 |
window.addEventListener('resize', setChatbotHeight);
|
666 |
+
window.addEventListener('scroll', function(){setChatbotHeight();setUpdateWindowHeight();});
|
667 |
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
|
668 |
|
669 |
+
// console suprise
|
670 |
+
var styleTitle1 = `
|
671 |
+
font-size: 16px;
|
672 |
+
font-family: ui-monospace, monospace;
|
673 |
+
color: #06AE56;
|
674 |
+
`
|
675 |
+
var styleDesc1 = `
|
676 |
+
font-size: 12px;
|
677 |
+
font-family: ui-monospace, monospace;
|
678 |
+
`
|
679 |
+
function makeML(str) {
|
680 |
+
let l = new String(str)
|
681 |
+
l = l.substring(l.indexOf("/*") + 3, l.lastIndexOf("*/"))
|
682 |
+
return l
|
683 |
+
}
|
684 |
+
let ChuanhuInfo = function () {
|
685 |
+
/*
|
686 |
+
________ __ ________ __
|
687 |
+
/ ____/ /_ __ ______ _____ / /_ __ __ / ____/ /_ ____ _/ /_
|
688 |
+
/ / / __ \/ / / / __ `/ __ \/ __ \/ / / / / / / __ \/ __ `/ __/
|
689 |
+
/ /___/ / / / /_/ / /_/ / / / / / / / /_/ / / /___/ / / / /_/ / /_
|
690 |
+
\____/_/ /_/\__,_/\__,_/_/ /_/_/ /_/\__,_/ \____/_/ /_/\__,_/\__/
|
691 |
+
|
692 |
+
川虎Chat (Chuanhu Chat) - GUI for ChatGPT API and many LLMs
|
693 |
+
*/
|
694 |
+
}
|
695 |
+
let description = `
|
696 |
+
© 2023 Chuanhu, MZhao, Keldos
|
697 |
+
GitHub repository: [https://github.com/GaiZhenbiao/ChuanhuChatGPT]\n
|
698 |
+
Enjoy our project!\n
|
699 |
+
`
|
700 |
+
console.log(`%c${makeML(ChuanhuInfo)}`,styleTitle1)
|
701 |
+
console.log(`%c${description}`, styleDesc1)
|
702 |
+
|
703 |
// button svg code
|
704 |
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
705 |
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
assets/favicon.ico
CHANGED
|
|
assets/html/appearance_switcher.html
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
-
<div
|
2 |
-
<
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
<label class="apSwitch" for="checkbox">
|
7 |
-
<input type="checkbox" id="checkbox">
|
8 |
-
<div class="apSlider"></div>
|
9 |
-
</label>
|
10 |
-
</span>
|
11 |
</div>
|
|
|
1 |
+
<div class="switch_checkbox" id="apSwitch">
|
2 |
+
<label class="apSwitch">
|
3 |
+
<input type="checkbox" id="apSwitch_checkbox" data-testid="checkbox" />
|
4 |
+
<span class="apSwitch_span">{label}</span>
|
5 |
+
</label>
|
|
|
|
|
|
|
|
|
|
|
6 |
</div>
|
config_example.json
CHANGED
@@ -1,24 +1,53 @@
|
|
1 |
{
|
2 |
-
//
|
3 |
-
|
4 |
-
|
5 |
-
// 你的
|
6 |
-
"
|
7 |
-
"
|
8 |
-
//
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
"users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
|
12 |
"local_embedding": false, //是否在本地编制索引
|
|
|
|
|
13 |
"default_model": "gpt-3.5-turbo", // 默认模型
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
"advance_docs": {
|
15 |
"pdf": {
|
16 |
-
// 是否认为PDF是双栏的
|
17 |
-
"
|
18 |
-
// 是否使用OCR识别PDF中的公式
|
19 |
-
"formula_ocr": true
|
20 |
}
|
21 |
},
|
|
|
|
|
22 |
// 是否多个API Key轮换使用
|
23 |
"multi_api_key": false,
|
24 |
"api_key_list": [
|
@@ -26,7 +55,12 @@
|
|
26 |
"sk-xxxxxxxxxxxxxxxxxxxxxxxx2",
|
27 |
"sk-xxxxxxxxxxxxxxxxxxxxxxxx3"
|
28 |
],
|
29 |
-
//
|
|
|
|
|
|
|
|
|
|
|
30 |
// "server_name": "0.0.0.0",
|
31 |
// "server_port": 7860,
|
32 |
// 如果要share到gradio,设置为true
|
|
|
1 |
{
|
2 |
+
// 各配置具体说明,见 [https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#配置-configjson]
|
3 |
+
|
4 |
+
//== API 配置 ==
|
5 |
+
"openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
|
6 |
+
"google_palm_api_key": "", // 你的 Google PaLM API Key,用于 Google PaLM 对话模型
|
7 |
+
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
|
8 |
+
"minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
|
9 |
+
"minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
|
10 |
+
|
11 |
+
//== Azure ==
|
12 |
+
"openai_api_type": "openai", // 可选项:azure, openai
|
13 |
+
"azure_openai_api_key": "", // 你的 Azure OpenAI API Key,用于 Azure OpenAI 对话模型
|
14 |
+
"azure_openai_api_base_url": "", // 你的 Azure Base URL
|
15 |
+
"azure_openai_api_version": "2023-05-15", // 你的 Azure OpenAI API 版本
|
16 |
+
"azure_deployment_name": "", // 你的 Azure OpenAI Chat 模型 Deployment 名称
|
17 |
+
"azure_embedding_deployment_name": "", // 你的 Azure OpenAI Embedding 模型 Deployment 名称
|
18 |
+
"azure_embedding_model_name": "text-embedding-ada-002", // 你的 Azure OpenAI Embedding 模型名称
|
19 |
+
|
20 |
+
//== 基础配置 ==
|
21 |
+
"language": "auto", // 界面语言,可选"auto", "zh-CN", "en-US", "ja-JP", "ko-KR", "sv-SE"
|
22 |
"users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
|
23 |
"local_embedding": false, //是否在本地编制索引
|
24 |
+
"hide_history_when_not_logged_in": false, //未登录情况下是否不展示对话历史
|
25 |
+
"check_update": true, //是否启用检查更新
|
26 |
"default_model": "gpt-3.5-turbo", // 默认模型
|
27 |
+
|
28 |
+
//== API 用量 ==
|
29 |
+
"show_api_billing": false, //是否显示OpenAI API用量(启用需要填写sensitive_id)
|
30 |
+
"sensitive_id": "", // 你 OpenAI 账户的 Sensitive ID,用于查询 API 用量
|
31 |
+
"usage_limit": 120, // 该 OpenAI API Key 的当月限额,单位:美元,用于计算百分比和显示上限
|
32 |
+
"legacy_api_usage": false, // 是否使用旧版 API 用量查询接口(OpenAI现已关闭该接口,但是如果你在使用第三方 API,第三方可能仍然支持此接口)
|
33 |
+
|
34 |
+
//== 川虎助理设置 ==
|
35 |
+
"default_chuanhu_assistant_model": "gpt-4", //川虎助理使用的模型,可选gpt-3.5-turbo或者gpt-4等
|
36 |
+
"GOOGLE_CSE_ID": "", //谷歌搜索引擎ID,用于川虎助理Pro模式,获取方式请看 https://stackoverflow.com/questions/37083058/programmatically-searching-google-in-python-using-custom-search
|
37 |
+
"GOOGLE_API_KEY": "", //谷歌API Key,用于川虎助理Pro模式
|
38 |
+
"WOLFRAM_ALPHA_APPID": "", //Wolfram Alpha API Key,用于川虎助理Pro模式,获取方式请看 https://products.wolframalpha.com/api/
|
39 |
+
"SERPAPI_API_KEY": "", //SerpAPI API Key,用于川虎助理Pro模式,获取方式请看 https://serpapi.com/
|
40 |
+
|
41 |
+
//== 文档处理与显示 ==
|
42 |
+
"latex_option": "default", // LaTeX 公式渲染策略,可选"default", "strict", "all"或者"disabled"
|
43 |
"advance_docs": {
|
44 |
"pdf": {
|
45 |
+
"two_column": false, // 是否认为PDF是双栏的
|
46 |
+
"formula_ocr": true // 是否使用OCR识别PDF中的公式
|
|
|
|
|
47 |
}
|
48 |
},
|
49 |
+
|
50 |
+
//== 高级配置 ==
|
51 |
// 是否多个API Key轮换使用
|
52 |
"multi_api_key": false,
|
53 |
"api_key_list": [
|
|
|
55 |
"sk-xxxxxxxxxxxxxxxxxxxxxxxx2",
|
56 |
"sk-xxxxxxxxxxxxxxxxxxxxxxxx3"
|
57 |
],
|
58 |
+
// 自定义OpenAI API Base
|
59 |
+
// "openai_api_base": "https://api.openai.com",
|
60 |
+
// 自定义使用代理(请替换代理URL)
|
61 |
+
// "https_proxy": "http://127.0.0.1:1079",
|
62 |
+
// "http_proxy": "http://127.0.0.1:1079",
|
63 |
+
// 自定义端口、自定义ip(请替换对应内容)
|
64 |
// "server_name": "0.0.0.0",
|
65 |
// "server_port": 7860,
|
66 |
// 如果要share到gradio,设置为true
|
locale/en_US.json
CHANGED
@@ -32,24 +32,32 @@
|
|
32 |
"📝 导出为Markdown": "📝 Export as Markdown",
|
33 |
"默认保存于history文件夹": "Default save in history folder",
|
34 |
"高级": "Advanced",
|
35 |
-
"# ⚠️ 务必谨慎更改
|
36 |
"参数": "Parameters",
|
37 |
-
"
|
38 |
"用于定位滥用行为": "Used to locate abuse",
|
39 |
"用户名": "Username",
|
40 |
"网络设置": "Network Settings",
|
41 |
"在这里输入API-Host...": "Type in API-Host here...",
|
42 |
"🔄 切换API地址": "🔄 Switch API Address",
|
43 |
"在这里输入代理地址...": "Type in proxy address here...",
|
44 |
-
"代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809
|
45 |
"🔄 设置代理地址": "🔄 Set Proxy Address",
|
46 |
-
"🔙
|
|
|
|
|
|
|
|
|
|
|
47 |
"川虎Chat 🚀": "Chuanhu Chat 🚀",
|
48 |
"开始实时传输回答……": "Start streaming output...",
|
49 |
"Token 计数: ": "Token Count: ",
|
50 |
-
",本次对话累计消耗了 ": "
|
51 |
"**获取API使用情况失败**": "**Failed to get API usage**",
|
|
|
|
|
52 |
"**本月使用金额** ": "**Monthly usage** ",
|
|
|
53 |
"获取API使用情况失败:": "Failed to get API usage:",
|
54 |
"API密钥更改为了": "The API key is changed to",
|
55 |
"JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
|
@@ -64,10 +72,13 @@
|
|
64 |
"API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
|
65 |
"请输入对话内容。": "Enter the content of the conversation.",
|
66 |
"账单信息不适用": "Billing information is not applicable",
|
67 |
-
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)
|
68 |
"切换亮暗色主题": "Switch light/dark theme",
|
69 |
"您的IP区域:未知。": "Your IP region: Unknown.",
|
70 |
"获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
|
71 |
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
|
72 |
-
"您的IP区域:": "Your IP region: "
|
|
|
|
|
|
|
73 |
}
|
|
|
32 |
"📝 导出为Markdown": "📝 Export as Markdown",
|
33 |
"默认保存于history文件夹": "Default save in history folder",
|
34 |
"高级": "Advanced",
|
35 |
+
"# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Caution: Changes require care. ⚠️",
|
36 |
"参数": "Parameters",
|
37 |
+
"停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
|
38 |
"用于定位滥用行为": "Used to locate abuse",
|
39 |
"用户名": "Username",
|
40 |
"网络设置": "Network Settings",
|
41 |
"在这里输入API-Host...": "Type in API-Host here...",
|
42 |
"🔄 切换API地址": "🔄 Switch API Address",
|
43 |
"在这里输入代理地址...": "Type in proxy address here...",
|
44 |
+
"代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809)",
|
45 |
"🔄 设置代理地址": "🔄 Set Proxy Address",
|
46 |
+
"🔙 恢复网络默认设置": "🔙 Reset Network Settings",
|
47 |
+
"🔄 检查更新...": "🔄 Check for Update...",
|
48 |
+
"取消": "Cancel",
|
49 |
+
"更新": "Update",
|
50 |
+
"详情": "Details",
|
51 |
+
"好": "OK",
|
52 |
"川虎Chat 🚀": "Chuanhu Chat 🚀",
|
53 |
"开始实时传输回答……": "Start streaming output...",
|
54 |
"Token 计数: ": "Token Count: ",
|
55 |
+
",本次对话累计消耗了 ": ", Total cost for this dialogue is ",
|
56 |
"**获取API使用情况失败**": "**Failed to get API usage**",
|
57 |
+
"**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Failed to get API usage**, correct sensitive_id needed in `config.json`",
|
58 |
+
"**获取API使用情况失败**,sensitive_id错误或已过期": "**Failed to get API usage**, wrong or expired sensitive_id",
|
59 |
"**本月使用金额** ": "**Monthly usage** ",
|
60 |
+
"本月使用金额": "Monthly usage",
|
61 |
"获取API使用情况失败:": "Failed to get API usage:",
|
62 |
"API密钥更改为了": "The API key is changed to",
|
63 |
"JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
|
|
|
72 |
"API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
|
73 |
"请输入对话内容。": "Enter the content of the conversation.",
|
74 |
"账单信息不适用": "Billing information is not applicable",
|
75 |
+
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
76 |
"切换亮暗色主题": "Switch light/dark theme",
|
77 |
"您的IP区域:未知。": "Your IP region: Unknown.",
|
78 |
"获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
|
79 |
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
|
80 |
+
"您的IP区域:": "Your IP region: ",
|
81 |
+
"总结": "Summarize",
|
82 |
+
"生成内容总结中……": "Generating content summary...",
|
83 |
+
"由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Due to the following reasons, Google refuses to provide an answer to PaLM: \n\n"
|
84 |
}
|
locale/ja_JP.json
CHANGED
@@ -32,9 +32,9 @@
|
|
32 |
"📝 导出为Markdown": "📝 Markdownでエクスポート",
|
33 |
"默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
|
34 |
"高级": "Advanced",
|
35 |
-
"# ⚠️ 务必谨慎更改
|
36 |
"参数": "パラメータ",
|
37 |
-
"
|
38 |
"用于定位滥用行为": "不正行為を特定するために使用されます",
|
39 |
"用户名": "ユーザー名",
|
40 |
"网络设置": "ネットワーク設定",
|
@@ -43,13 +43,21 @@
|
|
43 |
"在这里输入代理地址...": "プロキシアドレスを入力してください...",
|
44 |
"代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
|
45 |
"🔄 设置代理地址": "🔄 プロキシアドレスを設定",
|
46 |
-
"🔙
|
|
|
|
|
|
|
|
|
|
|
47 |
"川虎Chat 🚀": "川虎Chat 🚀",
|
48 |
"开始实时传输回答……": "ストリーム出力開始……",
|
49 |
"Token 计数: ": "Token数: ",
|
50 |
",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
|
51 |
"**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
|
|
|
|
|
52 |
"**本月使用金额** ": "**今月の使用料金** ",
|
|
|
53 |
"获取API使用情况失败:": "API使用状況の取得に失敗しました:",
|
54 |
"API密钥更改为了": "APIキーが変更されました",
|
55 |
"JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
|
@@ -64,10 +72,13 @@
|
|
64 |
"API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
|
65 |
"请输入对话内容。": "会話内容を入力してください。",
|
66 |
"账单信息不适用": "課金情報は対象外です",
|
67 |
-
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)
|
68 |
"切换亮暗色主题": "テーマの明暗切替",
|
69 |
"您的IP区域:未知。": "あなたのIPアドレス地域:不明",
|
70 |
"获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
|
71 |
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
|
72 |
-
"您的IP区域:": "あなたのIPアドレス地域:"
|
73 |
-
|
|
|
|
|
|
|
|
32 |
"📝 导出为Markdown": "📝 Markdownでエクスポート",
|
33 |
"默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
|
34 |
"高级": "Advanced",
|
35 |
+
"# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 変更には慎重に ⚠️",
|
36 |
"参数": "パラメータ",
|
37 |
+
"停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
|
38 |
"用于定位滥用行为": "不正行為を特定するために使用されます",
|
39 |
"用户名": "ユーザー名",
|
40 |
"网络设置": "ネットワーク設定",
|
|
|
43 |
"在这里输入代理地址...": "プロキシアドレスを入力してください...",
|
44 |
"代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
|
45 |
"🔄 设置代理地址": "🔄 プロキシアドレスを設定",
|
46 |
+
"🔙 恢复默认网络设置": "🔙 ネットワーク設定のリセット",
|
47 |
+
"🔄 检查更新...": "🔄 アップデートをチェック...",
|
48 |
+
"取消": "キャンセル",
|
49 |
+
"更新": "アップデート",
|
50 |
+
"详情": "詳細",
|
51 |
+
"好": "はい",
|
52 |
"川虎Chat 🚀": "川虎Chat 🚀",
|
53 |
"开始实时传输回答……": "ストリーム出力開始……",
|
54 |
"Token 计数: ": "Token数: ",
|
55 |
",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
|
56 |
"**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
|
57 |
+
"**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API使用状況の取得に失敗しました**、`config.json`に正しい`sensitive_id`を入力する必要があります",
|
58 |
+
"**获取API使用情况失败**,sensitive_id错误或已过期": "**API使用状況の取得に失敗しました**、sensitive_idが間違っているか、期限切れです",
|
59 |
"**本月使用金额** ": "**今月の使用料金** ",
|
60 |
+
"本月使用金额": "今月の使用料金",
|
61 |
"获取API使用情况失败:": "API使用状況の取得に失敗しました:",
|
62 |
"API密钥更改为了": "APIキーが変更されました",
|
63 |
"JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
|
|
|
72 |
"API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
|
73 |
"请输入对话内容。": "会話内容を入力してください。",
|
74 |
"账单信息不适用": "課金情報は対象外です",
|
75 |
+
"由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
|
76 |
"切换亮暗色主题": "テーマの明暗切替",
|
77 |
"您的IP区域:未知。": "あなたのIPアドレス地域:不明",
|
78 |
"获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
|
79 |
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
|
80 |
+
"您的IP区域:": "あなたのIPアドレス地域:",
|
81 |
+
"总结": "要約する",
|
82 |
+
"生成内容总结中……": "コンテンツ概要を生成しています...",
|
83 |
+
"由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Googleは以下の理由から、PaLMの回答を返すことを拒否しています:\n\n"
|
84 |
+
}
|
modules/config.py
CHANGED
@@ -11,6 +11,7 @@ from . import presets
|
|
11 |
|
12 |
__all__ = [
|
13 |
"my_api_key",
|
|
|
14 |
"authflag",
|
15 |
"auth_list",
|
16 |
"dockerflag",
|
@@ -23,8 +24,11 @@ __all__ = [
|
|
23 |
"server_name",
|
24 |
"server_port",
|
25 |
"share",
|
|
|
|
|
26 |
"hide_history_when_not_logged_in",
|
27 |
-
"default_chuanhu_assistant_model"
|
|
|
28 |
]
|
29 |
|
30 |
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
|
@@ -35,10 +39,22 @@ if os.path.exists("config.json"):
|
|
35 |
else:
|
36 |
config = {}
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
lang_config = config.get("language", "auto")
|
39 |
language = os.environ.get("LANGUAGE", lang_config)
|
40 |
|
41 |
-
hide_history_when_not_logged_in = config.get(
|
|
|
|
|
|
|
|
|
42 |
|
43 |
if os.path.exists("api_key.txt"):
|
44 |
logging.info("检测到api_key.txt文件,正在进行迁移...")
|
@@ -52,26 +68,39 @@ if os.path.exists("auth.json"):
|
|
52 |
logging.info("检测到auth.json文件,正在进行迁移...")
|
53 |
auth_list = []
|
54 |
with open("auth.json", "r", encoding='utf-8') as f:
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
config["users"] = auth_list
|
63 |
os.rename("auth.json", "auth(deprecated).json")
|
64 |
with open("config.json", "w", encoding='utf-8') as f:
|
65 |
json.dump(config, f, indent=4, ensure_ascii=False)
|
66 |
|
67 |
-
|
68 |
dockerflag = config.get("dockerflag", False)
|
69 |
if os.environ.get("dockerrun") == "yes":
|
70 |
dockerflag = True
|
71 |
|
72 |
-
|
73 |
my_api_key = config.get("openai_api_key", "")
|
74 |
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
77 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
@@ -81,11 +110,14 @@ os.environ["MINIMAX_API_KEY"] = minimax_api_key
|
|
81 |
minimax_group_id = config.get("minimax_group_id", "")
|
82 |
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
|
83 |
|
|
|
|
|
|
|
84 |
|
85 |
usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
|
86 |
|
87 |
-
|
88 |
-
multi_api_key = config.get("multi_api_key", False)
|
89 |
if multi_api_key:
|
90 |
api_key_list = config.get("api_key_list", [])
|
91 |
if len(api_key_list) == 0:
|
@@ -93,21 +125,26 @@ if multi_api_key:
|
|
93 |
sys.exit(1)
|
94 |
shared.state.set_api_key_queue(api_key_list)
|
95 |
|
96 |
-
auth_list = config.get("users", [])
|
97 |
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
|
98 |
|
99 |
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
|
100 |
-
api_host = os.environ.get(
|
|
|
101 |
if api_host is not None:
|
102 |
shared.state.set_api_host(api_host)
|
|
|
|
|
103 |
|
104 |
-
default_chuanhu_assistant_model = config.get(
|
|
|
105 |
for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
|
106 |
if config.get(x, None) is not None:
|
107 |
os.environ[x] = config[x]
|
108 |
|
|
|
109 |
@contextmanager
|
110 |
-
def retrieve_openai_api(api_key
|
111 |
old_api_key = os.environ.get("OPENAI_API_KEY", "")
|
112 |
if api_key is None:
|
113 |
os.environ["OPENAI_API_KEY"] = my_api_key
|
@@ -117,24 +154,26 @@ def retrieve_openai_api(api_key = None):
|
|
117 |
yield api_key
|
118 |
os.environ["OPENAI_API_KEY"] = old_api_key
|
119 |
|
120 |
-
|
|
|
121 |
log_level = config.get("log_level", "INFO")
|
122 |
logging.basicConfig(
|
123 |
level=log_level,
|
124 |
format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
|
125 |
)
|
126 |
|
127 |
-
|
128 |
-
http_proxy =
|
129 |
-
https_proxy =
|
130 |
-
http_proxy =
|
131 |
-
https_proxy =
|
132 |
|
133 |
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
|
134 |
os.environ["HTTP_PROXY"] = ""
|
135 |
os.environ["HTTPS_PROXY"] = ""
|
136 |
|
137 |
-
local_embedding = config.get("local_embedding", False)
|
|
|
138 |
|
139 |
@contextmanager
|
140 |
def retrieve_proxy(proxy=None):
|
@@ -151,22 +190,62 @@ def retrieve_proxy(proxy=None):
|
|
151 |
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
|
152 |
os.environ["HTTP_PROXY"] = http_proxy
|
153 |
os.environ["HTTPS_PROXY"] = https_proxy
|
154 |
-
yield http_proxy, https_proxy
|
155 |
|
156 |
# return old proxy
|
157 |
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
|
158 |
|
159 |
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
advance_docs = defaultdict(lambda: defaultdict(dict))
|
162 |
advance_docs.update(config.get("advance_docs", {}))
|
|
|
|
|
163 |
def update_doc_config(two_column_pdf):
|
164 |
global advance_docs
|
165 |
advance_docs["pdf"]["two_column"] = two_column_pdf
|
166 |
|
167 |
logging.info(f"更新后的文件参数为:{advance_docs}")
|
168 |
|
169 |
-
|
|
|
170 |
server_name = config.get("server_name", None)
|
171 |
server_port = config.get("server_port", None)
|
172 |
if server_name is None:
|
|
|
11 |
|
12 |
__all__ = [
|
13 |
"my_api_key",
|
14 |
+
"sensitive_id",
|
15 |
"authflag",
|
16 |
"auth_list",
|
17 |
"dockerflag",
|
|
|
24 |
"server_name",
|
25 |
"server_port",
|
26 |
"share",
|
27 |
+
"check_update",
|
28 |
+
"latex_delimiters_set",
|
29 |
"hide_history_when_not_logged_in",
|
30 |
+
"default_chuanhu_assistant_model",
|
31 |
+
"show_api_billing"
|
32 |
]
|
33 |
|
34 |
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
|
|
|
39 |
else:
|
40 |
config = {}
|
41 |
|
42 |
+
|
43 |
+
def load_config_to_environ(key_list):
|
44 |
+
global config
|
45 |
+
for key in key_list:
|
46 |
+
if key in config:
|
47 |
+
os.environ[key.upper()] = os.environ.get(key.upper(), config[key])
|
48 |
+
|
49 |
+
|
50 |
lang_config = config.get("language", "auto")
|
51 |
language = os.environ.get("LANGUAGE", lang_config)
|
52 |
|
53 |
+
hide_history_when_not_logged_in = config.get(
|
54 |
+
"hide_history_when_not_logged_in", False)
|
55 |
+
check_update = config.get("check_update", True)
|
56 |
+
show_api_billing = config.get("show_api_billing", False)
|
57 |
+
show_api_billing = bool(os.environ.get("SHOW_API_BILLING", show_api_billing))
|
58 |
|
59 |
if os.path.exists("api_key.txt"):
|
60 |
logging.info("检测到api_key.txt文件,正在进行迁移...")
|
|
|
68 |
logging.info("检测到auth.json文件,正在进行迁移...")
|
69 |
auth_list = []
|
70 |
with open("auth.json", "r", encoding='utf-8') as f:
|
71 |
+
auth = json.load(f)
|
72 |
+
for _ in auth:
|
73 |
+
if auth[_]["username"] and auth[_]["password"]:
|
74 |
+
auth_list.append((auth[_]["username"], auth[_]["password"]))
|
75 |
+
else:
|
76 |
+
logging.error("请检查auth.json文件中的用户名和密码!")
|
77 |
+
sys.exit(1)
|
78 |
config["users"] = auth_list
|
79 |
os.rename("auth.json", "auth(deprecated).json")
|
80 |
with open("config.json", "w", encoding='utf-8') as f:
|
81 |
json.dump(config, f, indent=4, ensure_ascii=False)
|
82 |
|
83 |
+
# 处理docker if we are running in Docker
|
84 |
dockerflag = config.get("dockerflag", False)
|
85 |
if os.environ.get("dockerrun") == "yes":
|
86 |
dockerflag = True
|
87 |
|
88 |
+
# 处理 api-key 以及 允许的用户列表
|
89 |
my_api_key = config.get("openai_api_key", "")
|
90 |
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
|
91 |
+
os.environ["OPENAI_API_KEY"] = my_api_key
|
92 |
+
os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
|
93 |
+
|
94 |
+
if config.get("legacy_api_usage", False):
|
95 |
+
sensitive_id = config.get("sensitive_id", "")
|
96 |
+
sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
|
97 |
+
else:
|
98 |
+
sensitive_id = my_api_key
|
99 |
+
|
100 |
+
google_palm_api_key = config.get("google_palm_api_key", "")
|
101 |
+
google_palm_api_key = os.environ.get(
|
102 |
+
"GOOGLE_PALM_API_KEY", google_palm_api_key)
|
103 |
+
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
|
104 |
|
105 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
106 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
|
|
110 |
minimax_group_id = config.get("minimax_group_id", "")
|
111 |
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
|
112 |
|
113 |
+
load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
|
114 |
+
"azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
|
115 |
+
|
116 |
|
117 |
usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
|
118 |
|
119 |
+
# 多账户机制
|
120 |
+
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
|
121 |
if multi_api_key:
|
122 |
api_key_list = config.get("api_key_list", [])
|
123 |
if len(api_key_list) == 0:
|
|
|
125 |
sys.exit(1)
|
126 |
shared.state.set_api_key_queue(api_key_list)
|
127 |
|
128 |
+
auth_list = config.get("users", []) # 实际上是使用者的列表
|
129 |
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
|
130 |
|
131 |
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
|
132 |
+
api_host = os.environ.get(
|
133 |
+
"OPENAI_API_BASE", config.get("openai_api_base", None))
|
134 |
if api_host is not None:
|
135 |
shared.state.set_api_host(api_host)
|
136 |
+
os.environ["OPENAI_API_BASE"] = f"{api_host}/v1"
|
137 |
+
logging.info(f"OpenAI API Base set to: {os.environ['OPENAI_API_BASE']}")
|
138 |
|
139 |
+
default_chuanhu_assistant_model = config.get(
|
140 |
+
"default_chuanhu_assistant_model", "gpt-3.5-turbo")
|
141 |
for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
|
142 |
if config.get(x, None) is not None:
|
143 |
os.environ[x] = config[x]
|
144 |
|
145 |
+
|
146 |
@contextmanager
|
147 |
+
def retrieve_openai_api(api_key=None):
|
148 |
old_api_key = os.environ.get("OPENAI_API_KEY", "")
|
149 |
if api_key is None:
|
150 |
os.environ["OPENAI_API_KEY"] = my_api_key
|
|
|
154 |
yield api_key
|
155 |
os.environ["OPENAI_API_KEY"] = old_api_key
|
156 |
|
157 |
+
|
158 |
+
# 处理log
|
159 |
log_level = config.get("log_level", "INFO")
|
160 |
logging.basicConfig(
|
161 |
level=log_level,
|
162 |
format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
|
163 |
)
|
164 |
|
165 |
+
# 处理代理:
|
166 |
+
http_proxy = os.environ.get("HTTP_PROXY", "")
|
167 |
+
https_proxy = os.environ.get("HTTPS_PROXY", "")
|
168 |
+
http_proxy = config.get("http_proxy", http_proxy)
|
169 |
+
https_proxy = config.get("https_proxy", https_proxy)
|
170 |
|
171 |
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
|
172 |
os.environ["HTTP_PROXY"] = ""
|
173 |
os.environ["HTTPS_PROXY"] = ""
|
174 |
|
175 |
+
local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
|
176 |
+
|
177 |
|
178 |
@contextmanager
|
179 |
def retrieve_proxy(proxy=None):
|
|
|
190 |
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
|
191 |
os.environ["HTTP_PROXY"] = http_proxy
|
192 |
os.environ["HTTPS_PROXY"] = https_proxy
|
193 |
+
yield http_proxy, https_proxy # return new proxy
|
194 |
|
195 |
# return old proxy
|
196 |
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
|
197 |
|
198 |
|
199 |
+
# 处理latex options
|
200 |
+
user_latex_option = config.get("latex_option", "default")
|
201 |
+
if user_latex_option == "default":
|
202 |
+
latex_delimiters_set = [
|
203 |
+
{"left": "$$", "right": "$$", "display": True},
|
204 |
+
{"left": "$", "right": "$", "display": False},
|
205 |
+
{"left": "\\(", "right": "\\)", "display": False},
|
206 |
+
{"left": "\\[", "right": "\\]", "display": True},
|
207 |
+
]
|
208 |
+
elif user_latex_option == "strict":
|
209 |
+
latex_delimiters_set = [
|
210 |
+
{"left": "$$", "right": "$$", "display": True},
|
211 |
+
{"left": "\\(", "right": "\\)", "display": False},
|
212 |
+
{"left": "\\[", "right": "\\]", "display": True},
|
213 |
+
]
|
214 |
+
elif user_latex_option == "all":
|
215 |
+
latex_delimiters_set = [
|
216 |
+
{"left": "$$", "right": "$$", "display": True},
|
217 |
+
{"left": "$", "right": "$", "display": False},
|
218 |
+
{"left": "\\(", "right": "\\)", "display": False},
|
219 |
+
{"left": "\\[", "right": "\\]", "display": True},
|
220 |
+
{"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
|
221 |
+
{"left": "\\begin{align}", "right": "\\end{align}", "display": True},
|
222 |
+
{"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
|
223 |
+
{"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
|
224 |
+
{"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
|
225 |
+
]
|
226 |
+
elif user_latex_option == "disabled":
|
227 |
+
latex_delimiters_set = []
|
228 |
+
else:
|
229 |
+
latex_delimiters_set = [
|
230 |
+
{"left": "$$", "right": "$$", "display": True},
|
231 |
+
{"left": "$", "right": "$", "display": False},
|
232 |
+
{"left": "\\(", "right": "\\)", "display": False},
|
233 |
+
{"left": "\\[", "right": "\\]", "display": True},
|
234 |
+
]
|
235 |
+
|
236 |
+
# 处理advance docs
|
237 |
advance_docs = defaultdict(lambda: defaultdict(dict))
|
238 |
advance_docs.update(config.get("advance_docs", {}))
|
239 |
+
|
240 |
+
|
241 |
def update_doc_config(two_column_pdf):
|
242 |
global advance_docs
|
243 |
advance_docs["pdf"]["two_column"] = two_column_pdf
|
244 |
|
245 |
logging.info(f"更新后的文件参数为:{advance_docs}")
|
246 |
|
247 |
+
|
248 |
+
# 处理gradio.launch参数
|
249 |
server_name = config.get("server_name", None)
|
250 |
server_port = config.get("server_port", None)
|
251 |
if server_name is None:
|
modules/index_func.py
CHANGED
@@ -47,11 +47,12 @@ def get_documents(file_src):
|
|
47 |
pdftext = parse_pdf(filepath, two_column).text
|
48 |
except:
|
49 |
pdftext = ""
|
50 |
-
with open(filepath, "rb"
|
51 |
pdfReader = PyPDF2.PdfReader(pdfFileObj)
|
52 |
for page in tqdm(pdfReader.pages):
|
53 |
pdftext += page.extract_text()
|
54 |
-
texts = [Document(page_content=pdftext,
|
|
|
55 |
elif file_type == ".docx":
|
56 |
logging.debug("Loading Word...")
|
57 |
from langchain.document_loaders import UnstructuredWordDocumentLoader
|
@@ -72,7 +73,8 @@ def get_documents(file_src):
|
|
72 |
text_list = excel_to_string(filepath)
|
73 |
texts = []
|
74 |
for elem in text_list:
|
75 |
-
texts.append(Document(page_content=elem,
|
|
|
76 |
else:
|
77 |
logging.debug("Loading text file...")
|
78 |
from langchain.document_loaders import TextLoader
|
@@ -115,10 +117,16 @@ def construct_index(
|
|
115 |
index_path = f"./index/{index_name}"
|
116 |
if local_embedding:
|
117 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
118 |
-
embeddings = HuggingFaceEmbeddings(
|
|
|
119 |
else:
|
120 |
from langchain.embeddings import OpenAIEmbeddings
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
122 |
if os.path.exists(index_path):
|
123 |
logging.info("找到了缓存的索引文件,加载中……")
|
124 |
return FAISS.load_local(index_path, embeddings)
|
|
|
47 |
pdftext = parse_pdf(filepath, two_column).text
|
48 |
except:
|
49 |
pdftext = ""
|
50 |
+
with open(filepath, "rb") as pdfFileObj:
|
51 |
pdfReader = PyPDF2.PdfReader(pdfFileObj)
|
52 |
for page in tqdm(pdfReader.pages):
|
53 |
pdftext += page.extract_text()
|
54 |
+
texts = [Document(page_content=pdftext,
|
55 |
+
metadata={"source": filepath})]
|
56 |
elif file_type == ".docx":
|
57 |
logging.debug("Loading Word...")
|
58 |
from langchain.document_loaders import UnstructuredWordDocumentLoader
|
|
|
73 |
text_list = excel_to_string(filepath)
|
74 |
texts = []
|
75 |
for elem in text_list:
|
76 |
+
texts.append(Document(page_content=elem,
|
77 |
+
metadata={"source": filepath}))
|
78 |
else:
|
79 |
logging.debug("Loading text file...")
|
80 |
from langchain.document_loaders import TextLoader
|
|
|
117 |
index_path = f"./index/{index_name}"
|
118 |
if local_embedding:
|
119 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
120 |
+
embeddings = HuggingFaceEmbeddings(
|
121 |
+
model_name="sentence-transformers/distiluse-base-multilingual-cased-v2")
|
122 |
else:
|
123 |
from langchain.embeddings import OpenAIEmbeddings
|
124 |
+
if os.environ.get("OPENAI_API_TYPE", "openai") == "openai":
|
125 |
+
embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get(
|
126 |
+
"OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key))
|
127 |
+
else:
|
128 |
+
embeddings = OpenAIEmbeddings(deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
|
129 |
+
model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure")
|
130 |
if os.path.exists(index_path):
|
131 |
logging.info("找到了缓存的索引文件,加载中……")
|
132 |
return FAISS.load_local(index_path, embeddings)
|
modules/models/base_model.py
CHANGED
@@ -29,6 +29,8 @@ from langchain.input import print_text
|
|
29 |
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
30 |
from threading import Thread, Condition
|
31 |
from collections import deque
|
|
|
|
|
32 |
|
33 |
from ..presets import *
|
34 |
from ..index_func import *
|
@@ -36,6 +38,7 @@ from ..utils import *
|
|
36 |
from .. import shared
|
37 |
from ..config import retrieve_proxy
|
38 |
|
|
|
39 |
class CallbackToIterator:
|
40 |
def __init__(self):
|
41 |
self.queue = deque()
|
@@ -52,7 +55,8 @@ class CallbackToIterator:
|
|
52 |
|
53 |
def __next__(self):
|
54 |
with self.cond:
|
55 |
-
|
|
|
56 |
self.cond.wait()
|
57 |
if not self.queue:
|
58 |
raise StopIteration()
|
@@ -63,6 +67,7 @@ class CallbackToIterator:
|
|
63 |
self.finished = True
|
64 |
self.cond.notify() # Wake up the generator if it's waiting.
|
65 |
|
|
|
66 |
def get_action_description(text):
|
67 |
match = re.search('```(.*?)```', text, re.S)
|
68 |
json_text = match.group(1)
|
@@ -76,6 +81,7 @@ def get_action_description(text):
|
|
76 |
else:
|
77 |
return ""
|
78 |
|
|
|
79 |
class ChuanhuCallbackHandler(BaseCallbackHandler):
|
80 |
|
81 |
def __init__(self, callback) -> None:
|
@@ -117,6 +123,10 @@ class ChuanhuCallbackHandler(BaseCallbackHandler):
|
|
117 |
"""Run on new LLM token. Only available when streaming is enabled."""
|
118 |
self.callback(token)
|
119 |
|
|
|
|
|
|
|
|
|
120 |
|
121 |
class ModelType(Enum):
|
122 |
Unknown = -1
|
@@ -129,6 +139,8 @@ class ModelType(Enum):
|
|
129 |
YuanAI = 6
|
130 |
Minimax = 7
|
131 |
ChuanhuAgent = 8
|
|
|
|
|
132 |
|
133 |
@classmethod
|
134 |
def get_type(cls, model_name: str):
|
@@ -152,6 +164,10 @@ class ModelType(Enum):
|
|
152 |
model_type = ModelType.Minimax
|
153 |
elif "川虎助理" in model_name_lower:
|
154 |
model_type = ModelType.ChuanhuAgent
|
|
|
|
|
|
|
|
|
155 |
else:
|
156 |
model_type = ModelType.Unknown
|
157 |
return model_type
|
@@ -161,7 +177,7 @@ class BaseLLMModel:
|
|
161 |
def __init__(
|
162 |
self,
|
163 |
model_name,
|
164 |
-
system_prompt=
|
165 |
temperature=1.0,
|
166 |
top_p=1.0,
|
167 |
n_choices=1,
|
@@ -201,7 +217,8 @@ class BaseLLMModel:
|
|
201 |
conversations are stored in self.history, with the most recent question, in OpenAI format
|
202 |
should return a generator, each time give the next word (str) in the answer
|
203 |
"""
|
204 |
-
logging.warning(
|
|
|
205 |
response, _ = self.get_answer_at_once()
|
206 |
yield response
|
207 |
|
@@ -212,7 +229,8 @@ class BaseLLMModel:
|
|
212 |
the answer (str)
|
213 |
total token count (int)
|
214 |
"""
|
215 |
-
logging.warning(
|
|
|
216 |
response_iter = self.get_answer_stream_iter()
|
217 |
count = 0
|
218 |
for response in response_iter:
|
@@ -246,7 +264,7 @@ class BaseLLMModel:
|
|
246 |
stream_iter = self.get_answer_stream_iter()
|
247 |
|
248 |
if display_append:
|
249 |
-
display_append =
|
250 |
for partial_text in stream_iter:
|
251 |
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
|
252 |
self.all_token_counts[-1] += 1
|
@@ -273,9 +291,11 @@ class BaseLLMModel:
|
|
273 |
self.history[-2] = construct_user(fake_input)
|
274 |
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
|
275 |
if fake_input is not None:
|
276 |
-
self.all_token_counts[-1] += count_token(
|
|
|
277 |
else:
|
278 |
-
self.all_token_counts[-1] = total_token_count -
|
|
|
279 |
status_text = self.token_message()
|
280 |
return chatbot, status_text
|
281 |
|
@@ -299,10 +319,13 @@ class BaseLLMModel:
|
|
299 |
from langchain.chat_models import ChatOpenAI
|
300 |
from langchain.callbacks import StdOutCallbackHandler
|
301 |
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
|
302 |
-
PROMPT = PromptTemplate(
|
|
|
303 |
llm = ChatOpenAI()
|
304 |
-
chain = load_summarize_chain(
|
305 |
-
|
|
|
|
|
306 |
print(i18n("总结") + f": {summary}")
|
307 |
chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
|
308 |
return chatbot, status
|
@@ -323,9 +346,12 @@ class BaseLLMModel:
|
|
323 |
msg = "索引获取成功,生成回答中……"
|
324 |
logging.info(msg)
|
325 |
with retrieve_proxy():
|
326 |
-
retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity_score_threshold",search_kwargs={
|
327 |
-
|
328 |
-
|
|
|
|
|
|
|
329 |
reference_results = add_source_numbers(reference_results)
|
330 |
display_append = add_details(reference_results)
|
331 |
display_append = "\n\n" + "".join(display_append)
|
@@ -348,10 +374,12 @@ class BaseLLMModel:
|
|
348 |
reference_results.append([result['body'], result['href']])
|
349 |
display_append.append(
|
350 |
# f"{idx+1}. [{domain_name}]({result['href']})\n"
|
351 |
-
f"<
|
352 |
)
|
353 |
reference_results = add_source_numbers(reference_results)
|
354 |
-
display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
|
|
|
|
|
355 |
real_inputs = (
|
356 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
357 |
.replace("{query}", real_inputs)
|
@@ -375,14 +403,16 @@ class BaseLLMModel:
|
|
375 |
|
376 |
status_text = "开始生成回答……"
|
377 |
logging.info(
|
378 |
-
|
|
|
379 |
)
|
380 |
if should_check_token_count:
|
381 |
yield chatbot + [(inputs, "")], status_text
|
382 |
if reply_language == "跟随问题语言(不稳定)":
|
383 |
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
384 |
|
385 |
-
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
|
|
|
386 |
yield chatbot + [(fake_inputs, "")], status_text
|
387 |
|
388 |
if (
|
@@ -568,10 +598,13 @@ class BaseLLMModel:
|
|
568 |
self.system_prompt = new_system_prompt
|
569 |
|
570 |
def set_key(self, new_access_key):
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
|
|
|
|
|
|
575 |
|
576 |
def set_single_turn(self, new_single_turn):
|
577 |
self.single_turn = new_single_turn
|
@@ -580,7 +613,8 @@ class BaseLLMModel:
|
|
580 |
self.history = []
|
581 |
self.all_token_counts = []
|
582 |
self.interrupted = False
|
583 |
-
pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(
|
|
|
584 |
return [], self.token_message([0])
|
585 |
|
586 |
def delete_first_conversation(self):
|
@@ -623,7 +657,8 @@ class BaseLLMModel:
|
|
623 |
|
624 |
def auto_save(self, chatbot):
|
625 |
history_file_path = get_history_filepath(self.user_identifier)
|
626 |
-
save_file(history_file_path, self.system_prompt,
|
|
|
627 |
|
628 |
def export_markdown(self, filename, chatbot, user_name):
|
629 |
if filename == "":
|
@@ -639,7 +674,8 @@ class BaseLLMModel:
|
|
639 |
filename = filename.name
|
640 |
try:
|
641 |
if "/" not in filename:
|
642 |
-
history_file_path = os.path.join(
|
|
|
643 |
else:
|
644 |
history_file_path = filename
|
645 |
with open(history_file_path, "r", encoding="utf-8") as f:
|
@@ -665,15 +701,33 @@ class BaseLLMModel:
|
|
665 |
logging.info(f"没有找到对话历史记录 {filename}")
|
666 |
return gr.update(), self.system_prompt, gr.update()
|
667 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
668 |
def auto_load(self):
|
669 |
if self.user_identifier == "":
|
670 |
self.reset()
|
671 |
return self.system_prompt, gr.update()
|
672 |
history_file_path = get_history_filepath(self.user_identifier)
|
673 |
-
filename, system_prompt, chatbot = self.load_chat_history(
|
|
|
674 |
return system_prompt, chatbot
|
675 |
|
676 |
-
|
677 |
def like(self):
|
678 |
"""like the last response, implement if needed
|
679 |
"""
|
@@ -683,3 +737,47 @@ class BaseLLMModel:
|
|
683 |
"""dislike the last response, implement if needed
|
684 |
"""
|
685 |
return gr.update()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
30 |
from threading import Thread, Condition
|
31 |
from collections import deque
|
32 |
+
from langchain.chat_models.base import BaseChatModel
|
33 |
+
from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
|
34 |
|
35 |
from ..presets import *
|
36 |
from ..index_func import *
|
|
|
38 |
from .. import shared
|
39 |
from ..config import retrieve_proxy
|
40 |
|
41 |
+
|
42 |
class CallbackToIterator:
|
43 |
def __init__(self):
|
44 |
self.queue = deque()
|
|
|
55 |
|
56 |
def __next__(self):
|
57 |
with self.cond:
|
58 |
+
# Wait for a value to be added to the queue.
|
59 |
+
while not self.queue and not self.finished:
|
60 |
self.cond.wait()
|
61 |
if not self.queue:
|
62 |
raise StopIteration()
|
|
|
67 |
self.finished = True
|
68 |
self.cond.notify() # Wake up the generator if it's waiting.
|
69 |
|
70 |
+
|
71 |
def get_action_description(text):
|
72 |
match = re.search('```(.*?)```', text, re.S)
|
73 |
json_text = match.group(1)
|
|
|
81 |
else:
|
82 |
return ""
|
83 |
|
84 |
+
|
85 |
class ChuanhuCallbackHandler(BaseCallbackHandler):
|
86 |
|
87 |
def __init__(self, callback) -> None:
|
|
|
123 |
"""Run on new LLM token. Only available when streaming is enabled."""
|
124 |
self.callback(token)
|
125 |
|
126 |
+
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any) -> Any:
|
127 |
+
"""Run when a chat model starts running."""
|
128 |
+
pass
|
129 |
+
|
130 |
|
131 |
class ModelType(Enum):
|
132 |
Unknown = -1
|
|
|
139 |
YuanAI = 6
|
140 |
Minimax = 7
|
141 |
ChuanhuAgent = 8
|
142 |
+
GooglePaLM = 9
|
143 |
+
LangchainChat = 10
|
144 |
|
145 |
@classmethod
|
146 |
def get_type(cls, model_name: str):
|
|
|
164 |
model_type = ModelType.Minimax
|
165 |
elif "川虎助理" in model_name_lower:
|
166 |
model_type = ModelType.ChuanhuAgent
|
167 |
+
elif "palm" in model_name_lower:
|
168 |
+
model_type = ModelType.GooglePaLM
|
169 |
+
elif "azure" or "api" in model_name_lower:
|
170 |
+
model_type = ModelType.LangchainChat
|
171 |
else:
|
172 |
model_type = ModelType.Unknown
|
173 |
return model_type
|
|
|
177 |
def __init__(
|
178 |
self,
|
179 |
model_name,
|
180 |
+
system_prompt=INITIAL_SYSTEM_PROMPT,
|
181 |
temperature=1.0,
|
182 |
top_p=1.0,
|
183 |
n_choices=1,
|
|
|
217 |
conversations are stored in self.history, with the most recent question, in OpenAI format
|
218 |
should return a generator, each time give the next word (str) in the answer
|
219 |
"""
|
220 |
+
logging.warning(
|
221 |
+
"stream predict not implemented, using at once predict instead")
|
222 |
response, _ = self.get_answer_at_once()
|
223 |
yield response
|
224 |
|
|
|
229 |
the answer (str)
|
230 |
total token count (int)
|
231 |
"""
|
232 |
+
logging.warning(
|
233 |
+
"at once predict not implemented, using stream predict instead")
|
234 |
response_iter = self.get_answer_stream_iter()
|
235 |
count = 0
|
236 |
for response in response_iter:
|
|
|
264 |
stream_iter = self.get_answer_stream_iter()
|
265 |
|
266 |
if display_append:
|
267 |
+
display_append = '\n\n<hr class="append-display no-in-raw" />' + display_append
|
268 |
for partial_text in stream_iter:
|
269 |
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
|
270 |
self.all_token_counts[-1] += 1
|
|
|
291 |
self.history[-2] = construct_user(fake_input)
|
292 |
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
|
293 |
if fake_input is not None:
|
294 |
+
self.all_token_counts[-1] += count_token(
|
295 |
+
construct_assistant(ai_reply))
|
296 |
else:
|
297 |
+
self.all_token_counts[-1] = total_token_count - \
|
298 |
+
sum(self.all_token_counts)
|
299 |
status_text = self.token_message()
|
300 |
return chatbot, status_text
|
301 |
|
|
|
319 |
from langchain.chat_models import ChatOpenAI
|
320 |
from langchain.callbacks import StdOutCallbackHandler
|
321 |
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
|
322 |
+
PROMPT = PromptTemplate(
|
323 |
+
template=prompt_template, input_variables=["text"])
|
324 |
llm = ChatOpenAI()
|
325 |
+
chain = load_summarize_chain(
|
326 |
+
llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
|
327 |
+
summary = chain({"input_documents": list(index.docstore.__dict__[
|
328 |
+
"_dict"].values())}, return_only_outputs=True)["output_text"]
|
329 |
print(i18n("总结") + f": {summary}")
|
330 |
chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
|
331 |
return chatbot, status
|
|
|
346 |
msg = "索引获取成功,生成回答中……"
|
347 |
logging.info(msg)
|
348 |
with retrieve_proxy():
|
349 |
+
retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity_score_threshold", search_kwargs={
|
350 |
+
"k": 6, "score_threshold": 0.5})
|
351 |
+
relevant_documents = retriever.get_relevant_documents(
|
352 |
+
real_inputs)
|
353 |
+
reference_results = [[d.page_content.strip("�"), os.path.basename(
|
354 |
+
d.metadata["source"])] for d in relevant_documents]
|
355 |
reference_results = add_source_numbers(reference_results)
|
356 |
display_append = add_details(reference_results)
|
357 |
display_append = "\n\n" + "".join(display_append)
|
|
|
374 |
reference_results.append([result['body'], result['href']])
|
375 |
display_append.append(
|
376 |
# f"{idx+1}. [{domain_name}]({result['href']})\n"
|
377 |
+
f"<a href=\"{result['href']}\" target=\"_blank\">{idx+1}. {result['title']}</a>"
|
378 |
)
|
379 |
reference_results = add_source_numbers(reference_results)
|
380 |
+
# display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
|
381 |
+
display_append = '<div class = "source-a">' + \
|
382 |
+
"".join(display_append) + '</div>'
|
383 |
real_inputs = (
|
384 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
385 |
.replace("{query}", real_inputs)
|
|
|
403 |
|
404 |
status_text = "开始生成回答……"
|
405 |
logging.info(
|
406 |
+
"用户" + f"{self.user_identifier}" + "的输入为:" +
|
407 |
+
colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
|
408 |
)
|
409 |
if should_check_token_count:
|
410 |
yield chatbot + [(inputs, "")], status_text
|
411 |
if reply_language == "跟随问题语言(不稳定)":
|
412 |
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
413 |
|
414 |
+
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
|
415 |
+
real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
|
416 |
yield chatbot + [(fake_inputs, "")], status_text
|
417 |
|
418 |
if (
|
|
|
598 |
self.system_prompt = new_system_prompt
|
599 |
|
600 |
def set_key(self, new_access_key):
|
601 |
+
if "*" not in new_access_key:
|
602 |
+
self.api_key = new_access_key.strip()
|
603 |
+
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
|
604 |
+
logging.info(msg)
|
605 |
+
return self.api_key, msg
|
606 |
+
else:
|
607 |
+
return gr.update(), gr.update()
|
608 |
|
609 |
def set_single_turn(self, new_single_turn):
|
610 |
self.single_turn = new_single_turn
|
|
|
613 |
self.history = []
|
614 |
self.all_token_counts = []
|
615 |
self.interrupted = False
|
616 |
+
pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(
|
617 |
+
os.path.join(HISTORY_DIR, self.user_identifier)))).touch()
|
618 |
return [], self.token_message([0])
|
619 |
|
620 |
def delete_first_conversation(self):
|
|
|
657 |
|
658 |
def auto_save(self, chatbot):
|
659 |
history_file_path = get_history_filepath(self.user_identifier)
|
660 |
+
save_file(history_file_path, self.system_prompt,
|
661 |
+
self.history, chatbot, self.user_identifier)
|
662 |
|
663 |
def export_markdown(self, filename, chatbot, user_name):
|
664 |
if filename == "":
|
|
|
674 |
filename = filename.name
|
675 |
try:
|
676 |
if "/" not in filename:
|
677 |
+
history_file_path = os.path.join(
|
678 |
+
HISTORY_DIR, user_name, filename)
|
679 |
else:
|
680 |
history_file_path = filename
|
681 |
with open(history_file_path, "r", encoding="utf-8") as f:
|
|
|
701 |
logging.info(f"没有找到对话历史记录 {filename}")
|
702 |
return gr.update(), self.system_prompt, gr.update()
|
703 |
|
704 |
+
def delete_chat_history(self, filename, user_name):
|
705 |
+
if filename == "CANCELED":
|
706 |
+
return gr.update(), gr.update(), gr.update()
|
707 |
+
if filename == "":
|
708 |
+
return i18n("你没有选择任何对话历史"), gr.update(), gr.update()
|
709 |
+
if not filename.endswith(".json"):
|
710 |
+
filename += ".json"
|
711 |
+
if "/" not in filename:
|
712 |
+
history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
|
713 |
+
else:
|
714 |
+
history_file_path = filename
|
715 |
+
try:
|
716 |
+
os.remove(history_file_path)
|
717 |
+
return i18n("删除对话历史成功"), get_history_names(False, user_name), []
|
718 |
+
except:
|
719 |
+
logging.info(f"删除对话历史失败 {history_file_path}")
|
720 |
+
return i18n("对话历史")+filename+i18n("已经被删除啦"), gr.update(), gr.update()
|
721 |
+
|
722 |
def auto_load(self):
|
723 |
if self.user_identifier == "":
|
724 |
self.reset()
|
725 |
return self.system_prompt, gr.update()
|
726 |
history_file_path = get_history_filepath(self.user_identifier)
|
727 |
+
filename, system_prompt, chatbot = self.load_chat_history(
|
728 |
+
history_file_path, self.user_identifier)
|
729 |
return system_prompt, chatbot
|
730 |
|
|
|
731 |
def like(self):
|
732 |
"""like the last response, implement if needed
|
733 |
"""
|
|
|
737 |
"""dislike the last response, implement if needed
|
738 |
"""
|
739 |
return gr.update()
|
740 |
+
|
741 |
+
|
742 |
+
class Base_Chat_Langchain_Client(BaseLLMModel):
|
743 |
+
def __init__(self, model_name, user_name=""):
|
744 |
+
super().__init__(model_name, user=user_name)
|
745 |
+
self.need_api_key = False
|
746 |
+
self.model = self.setup_model()
|
747 |
+
|
748 |
+
def setup_model(self):
|
749 |
+
# inplement this to setup the model then return it
|
750 |
+
pass
|
751 |
+
|
752 |
+
def _get_langchain_style_history(self):
|
753 |
+
history = [SystemMessage(content=self.system_prompt)]
|
754 |
+
for i in self.history:
|
755 |
+
if i["role"] == "user":
|
756 |
+
history.append(HumanMessage(content=i["content"]))
|
757 |
+
elif i["role"] == "assistant":
|
758 |
+
history.append(AIMessage(content=i["content"]))
|
759 |
+
return history
|
760 |
+
|
761 |
+
def get_answer_at_once(self):
|
762 |
+
assert isinstance(
|
763 |
+
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
|
764 |
+
history = self._get_langchain_style_history()
|
765 |
+
response = self.model.generate(history)
|
766 |
+
return response.content, sum(response.content)
|
767 |
+
|
768 |
+
def get_answer_stream_iter(self):
|
769 |
+
it = CallbackToIterator()
|
770 |
+
assert isinstance(
|
771 |
+
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
|
772 |
+
history = self._get_langchain_style_history()
|
773 |
+
|
774 |
+
def thread_func():
|
775 |
+
self.model(messages=history, callbacks=[
|
776 |
+
ChuanhuCallbackHandler(it.callback)])
|
777 |
+
it.finish()
|
778 |
+
t = Thread(target=thread_func)
|
779 |
+
t.start()
|
780 |
+
partial_text = ""
|
781 |
+
for value in it:
|
782 |
+
partial_text += value
|
783 |
+
yield partial_text
|
modules/models/models.py
CHANGED
@@ -24,7 +24,7 @@ from ..presets import *
|
|
24 |
from ..index_func import *
|
25 |
from ..utils import *
|
26 |
from .. import shared
|
27 |
-
from ..config import retrieve_proxy, usage_limit
|
28 |
from modules import config
|
29 |
from .base_model import BaseLLMModel, ModelType
|
30 |
|
@@ -87,21 +87,22 @@ class OpenAIClient(BaseLLMModel):
|
|
87 |
try:
|
88 |
usage_data = self._get_billing_data(usage_url)
|
89 |
except Exception as e:
|
90 |
-
logging.error(f"获取API使用情况失败:" + str(e))
|
|
|
|
|
|
|
|
|
91 |
return i18n("**获取API使用情况失败**")
|
92 |
# rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
|
93 |
rounded_usage = round(usage_data["total_usage"] / 100, 5)
|
94 |
usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
|
95 |
# return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
|
96 |
-
return ""
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
</div>
|
103 |
-
<div style="display: flex; justify-content: space-between;"><span>${rounded_usage}</span><span>${usage_limit}</span></div>
|
104 |
-
"""
|
105 |
except requests.exceptions.ConnectTimeout:
|
106 |
status_text = (
|
107 |
STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
@@ -179,9 +180,10 @@ class OpenAIClient(BaseLLMModel):
|
|
179 |
def _refresh_header(self):
|
180 |
self.headers = {
|
181 |
"Content-Type": "application/json",
|
182 |
-
"Authorization": f"Bearer {
|
183 |
}
|
184 |
|
|
|
185 |
def _get_billing_data(self, billing_url):
|
186 |
with retrieve_proxy():
|
187 |
response = requests.get(
|
@@ -560,6 +562,7 @@ def get_model(
|
|
560 |
try:
|
561 |
if model_type == ModelType.OpenAI:
|
562 |
logging.info(f"正在加载OpenAI模型: {model_name}")
|
|
|
563 |
model = OpenAIClient(
|
564 |
model_name=model_name,
|
565 |
api_key=access_key,
|
@@ -610,16 +613,25 @@ def get_model(
|
|
610 |
elif model_type == ModelType.ChuanhuAgent:
|
611 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
612 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
613 |
elif model_type == ModelType.Unknown:
|
614 |
raise ValueError(f"未知模型: {model_name}")
|
615 |
logging.info(msg)
|
616 |
except Exception as e:
|
617 |
-
|
|
|
618 |
msg = f"{STANDARD_ERROR_MSG}: {e}"
|
|
|
619 |
if dont_change_lora_selector:
|
620 |
-
return model, msg, chatbot
|
621 |
else:
|
622 |
-
return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility)
|
623 |
|
624 |
|
625 |
if __name__ == "__main__":
|
|
|
24 |
from ..index_func import *
|
25 |
from ..utils import *
|
26 |
from .. import shared
|
27 |
+
from ..config import retrieve_proxy, usage_limit, sensitive_id
|
28 |
from modules import config
|
29 |
from .base_model import BaseLLMModel, ModelType
|
30 |
|
|
|
87 |
try:
|
88 |
usage_data = self._get_billing_data(usage_url)
|
89 |
except Exception as e:
|
90 |
+
# logging.error(f"获取API使用情况失败: " + str(e))
|
91 |
+
if "Invalid authorization header" in str(e):
|
92 |
+
return i18n("**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id")
|
93 |
+
elif "Incorrect API key provided: sess" in str(e):
|
94 |
+
return i18n("**获取API使用情况失败**,sensitive_id错误或已过期")
|
95 |
return i18n("**获取API使用情况失败**")
|
96 |
# rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
|
97 |
rounded_usage = round(usage_data["total_usage"] / 100, 5)
|
98 |
usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
|
99 |
# return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
|
100 |
+
return get_html("billing_info.html").format(
|
101 |
+
label = i18n("本月使用金额"),
|
102 |
+
usage_percent = usage_percent,
|
103 |
+
rounded_usage = rounded_usage,
|
104 |
+
usage_limit = usage_limit
|
105 |
+
)
|
|
|
|
|
|
|
106 |
except requests.exceptions.ConnectTimeout:
|
107 |
status_text = (
|
108 |
STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
|
|
180 |
def _refresh_header(self):
|
181 |
self.headers = {
|
182 |
"Content-Type": "application/json",
|
183 |
+
"Authorization": f"Bearer {sensitive_id}",
|
184 |
}
|
185 |
|
186 |
+
|
187 |
def _get_billing_data(self, billing_url):
|
188 |
with retrieve_proxy():
|
189 |
response = requests.get(
|
|
|
562 |
try:
|
563 |
if model_type == ModelType.OpenAI:
|
564 |
logging.info(f"正在加载OpenAI模型: {model_name}")
|
565 |
+
access_key = os.environ.get("OPENAI_API_KEY", access_key)
|
566 |
model = OpenAIClient(
|
567 |
model_name=model_name,
|
568 |
api_key=access_key,
|
|
|
613 |
elif model_type == ModelType.ChuanhuAgent:
|
614 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
615 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
616 |
+
elif model_type == ModelType.GooglePaLM:
|
617 |
+
from .Google_PaLM import Google_PaLM_Client
|
618 |
+
access_key = os.environ.get("GOOGLE_PALM_API_KEY", access_key)
|
619 |
+
model = Google_PaLM_Client(model_name, access_key, user_name=user_name)
|
620 |
+
elif model_type == ModelType.LangchainChat:
|
621 |
+
from .azure import Azure_OpenAI_Client
|
622 |
+
model = Azure_OpenAI_Client(model_name, user_name=user_name)
|
623 |
elif model_type == ModelType.Unknown:
|
624 |
raise ValueError(f"未知模型: {model_name}")
|
625 |
logging.info(msg)
|
626 |
except Exception as e:
|
627 |
+
import traceback
|
628 |
+
traceback.print_exc()
|
629 |
msg = f"{STANDARD_ERROR_MSG}: {e}"
|
630 |
+
presudo_key = hide_middle_chars(access_key)
|
631 |
if dont_change_lora_selector:
|
632 |
+
return model, msg, chatbot, gr.update(), access_key, presudo_key
|
633 |
else:
|
634 |
+
return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility), access_key, presudo_key
|
635 |
|
636 |
|
637 |
if __name__ == "__main__":
|
modules/overwrites.py
CHANGED
@@ -80,8 +80,7 @@ with open("./assets/custom.js", "r", encoding="utf-8") as f, \
|
|
80 |
def reload_javascript():
|
81 |
print("Reloading javascript...")
|
82 |
js = f'<script>{customJS}</script><script async>{externalScripts}</script>'
|
83 |
-
|
84 |
-
# js += """\"""
|
85 |
def template_response(*args, **kwargs):
|
86 |
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
87 |
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
|
|
80 |
def reload_javascript():
|
81 |
print("Reloading javascript...")
|
82 |
js = f'<script>{customJS}</script><script async>{externalScripts}</script>'
|
83 |
+
js += '<script async src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>'
|
|
|
84 |
def template_response(*args, **kwargs):
|
85 |
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
86 |
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
modules/presets.py
CHANGED
@@ -60,7 +60,9 @@ ONLINE_MODELS = [
|
|
60 |
"gpt-4-32k-0613",
|
61 |
"川虎助理",
|
62 |
"川虎助理 Pro",
|
|
|
63 |
"xmchat",
|
|
|
64 |
"yuanai-1.0-base_10B",
|
65 |
"yuanai-1.0-translate",
|
66 |
"yuanai-1.0-dialog",
|
@@ -72,7 +74,9 @@ ONLINE_MODELS = [
|
|
72 |
LOCAL_MODELS = [
|
73 |
"chatglm-6b",
|
74 |
"chatglm-6b-int4",
|
75 |
-
"chatglm-6b-int4-
|
|
|
|
|
76 |
"StableLM",
|
77 |
"MOSS",
|
78 |
"llama-7b-hf",
|
@@ -121,6 +125,7 @@ REPLY_LANGUAGES = [
|
|
121 |
"Español",
|
122 |
"Français",
|
123 |
"Deutsch",
|
|
|
124 |
"跟随问题语言(不稳定)"
|
125 |
]
|
126 |
|
@@ -222,7 +227,7 @@ small_and_beautiful_theme = gr.themes.Soft(
|
|
222 |
# button_primary_background_fill_hover="*primary_400",
|
223 |
# button_primary_border_color="*primary_500",
|
224 |
button_primary_border_color_dark="*primary_600",
|
225 |
-
button_primary_text_color="
|
226 |
button_primary_text_color_dark="white",
|
227 |
button_secondary_background_fill="*neutral_100",
|
228 |
button_secondary_background_fill_hover="*neutral_50",
|
|
|
60 |
"gpt-4-32k-0613",
|
61 |
"川虎助理",
|
62 |
"川虎助理 Pro",
|
63 |
+
"GooglePaLM",
|
64 |
"xmchat",
|
65 |
+
"Azure OpenAI",
|
66 |
"yuanai-1.0-base_10B",
|
67 |
"yuanai-1.0-translate",
|
68 |
"yuanai-1.0-dialog",
|
|
|
74 |
LOCAL_MODELS = [
|
75 |
"chatglm-6b",
|
76 |
"chatglm-6b-int4",
|
77 |
+
"chatglm-6b-int4-ge",
|
78 |
+
"chatglm2-6b",
|
79 |
+
"chatglm2-6b-int4",
|
80 |
"StableLM",
|
81 |
"MOSS",
|
82 |
"llama-7b-hf",
|
|
|
125 |
"Español",
|
126 |
"Français",
|
127 |
"Deutsch",
|
128 |
+
"한국어",
|
129 |
"跟随问题语言(不稳定)"
|
130 |
]
|
131 |
|
|
|
227 |
# button_primary_background_fill_hover="*primary_400",
|
228 |
# button_primary_border_color="*primary_500",
|
229 |
button_primary_border_color_dark="*primary_600",
|
230 |
+
button_primary_text_color="white",
|
231 |
button_primary_text_color_dark="white",
|
232 |
button_secondary_background_fill="*neutral_100",
|
233 |
button_secondary_background_fill_hover="*neutral_50",
|
modules/utils.py
CHANGED
@@ -5,6 +5,7 @@ import logging
|
|
5 |
import json
|
6 |
import os
|
7 |
import datetime
|
|
|
8 |
import hashlib
|
9 |
import csv
|
10 |
import requests
|
@@ -47,6 +48,9 @@ def set_key(current_model, *args):
|
|
47 |
def load_chat_history(current_model, *args):
|
48 |
return current_model.load_chat_history(*args)
|
49 |
|
|
|
|
|
|
|
50 |
def interrupt(current_model, *args):
|
51 |
return current_model.interrupt(*args)
|
52 |
|
@@ -214,7 +218,10 @@ def convert_bot_before_marked(chat_message):
|
|
214 |
non_code_parts = code_block_pattern.split(chat_message)[::2]
|
215 |
result = []
|
216 |
|
217 |
-
|
|
|
|
|
|
|
218 |
for non_code, code in zip(non_code_parts, code_blocks + [""]):
|
219 |
if non_code.strip():
|
220 |
result.append(non_code)
|
@@ -236,7 +243,7 @@ def escape_markdown(text):
|
|
236 |
Escape Markdown special characters to HTML-safe equivalents.
|
237 |
"""
|
238 |
escape_chars = {
|
239 |
-
' ': ' ',
|
240 |
'_': '_',
|
241 |
'*': '*',
|
242 |
'[': '[',
|
@@ -253,8 +260,11 @@ def escape_markdown(text):
|
|
253 |
'`': '`',
|
254 |
'>': '>',
|
255 |
'<': '<',
|
256 |
-
'|': '|'
|
|
|
|
|
257 |
}
|
|
|
258 |
return ''.join(escape_chars.get(c, c) for c in text)
|
259 |
|
260 |
|
@@ -531,26 +541,55 @@ def run(command, desc=None, errdesc=None, custom_env=None, live=False):
|
|
531 |
raise RuntimeError(message)
|
532 |
return result.stdout.decode(encoding="utf8", errors="ignore")
|
533 |
|
534 |
-
def
|
535 |
git = os.environ.get('GIT', "git")
|
536 |
-
python_version = ".".join([str(x) for x in sys.version_info[0:3]])
|
537 |
try:
|
538 |
commit_hash = run(f"{git} rev-parse HEAD").strip()
|
539 |
except Exception:
|
540 |
commit_hash = "<none>"
|
541 |
if commit_hash != "<none>":
|
542 |
short_commit = commit_hash[0:7]
|
543 |
-
commit_info = f
|
544 |
else:
|
545 |
commit_info = "unknown \U0001F615"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
546 |
return f"""
|
547 |
Python: <span title="{sys.version}">{python_version}</span>
|
548 |
•
|
549 |
Gradio: {gr.__version__}
|
550 |
•
|
551 |
-
<a style="text-decoration:none;color:inherit" href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChat</a>: {
|
552 |
"""
|
553 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
554 |
def get_html(filename):
|
555 |
path = os.path.join(shared.chuanhu_path, "assets", "html", filename)
|
556 |
if os.path.exists(path):
|
|
|
5 |
import json
|
6 |
import os
|
7 |
import datetime
|
8 |
+
from datetime import timezone
|
9 |
import hashlib
|
10 |
import csv
|
11 |
import requests
|
|
|
48 |
def load_chat_history(current_model, *args):
|
49 |
return current_model.load_chat_history(*args)
|
50 |
|
51 |
+
def delete_chat_history(current_model, *args):
|
52 |
+
return current_model.delete_chat_history(*args)
|
53 |
+
|
54 |
def interrupt(current_model, *args):
|
55 |
return current_model.interrupt(*args)
|
56 |
|
|
|
218 |
non_code_parts = code_block_pattern.split(chat_message)[::2]
|
219 |
result = []
|
220 |
|
221 |
+
hr_pattern = r'\n\n<hr class="append-display no-in-raw" />(.*?)'
|
222 |
+
hr_match = re.search(hr_pattern, chat_message, re.DOTALL)
|
223 |
+
clip_hr = chat_message[:hr_match.start()] if hr_match else chat_message
|
224 |
+
raw = f'<div class="raw-message hideM">{escape_markdown(clip_hr)}</div>'
|
225 |
for non_code, code in zip(non_code_parts, code_blocks + [""]):
|
226 |
if non_code.strip():
|
227 |
result.append(non_code)
|
|
|
243 |
Escape Markdown special characters to HTML-safe equivalents.
|
244 |
"""
|
245 |
escape_chars = {
|
246 |
+
# ' ': ' ',
|
247 |
'_': '_',
|
248 |
'*': '*',
|
249 |
'[': '[',
|
|
|
260 |
'`': '`',
|
261 |
'>': '>',
|
262 |
'<': '<',
|
263 |
+
'|': '|',
|
264 |
+
'$': '$',
|
265 |
+
':': ':',
|
266 |
}
|
267 |
+
text = text.replace(' ', ' ')
|
268 |
return ''.join(escape_chars.get(c, c) for c in text)
|
269 |
|
270 |
|
|
|
541 |
raise RuntimeError(message)
|
542 |
return result.stdout.decode(encoding="utf8", errors="ignore")
|
543 |
|
544 |
+
def commit_html():
|
545 |
git = os.environ.get('GIT', "git")
|
|
|
546 |
try:
|
547 |
commit_hash = run(f"{git} rev-parse HEAD").strip()
|
548 |
except Exception:
|
549 |
commit_hash = "<none>"
|
550 |
if commit_hash != "<none>":
|
551 |
short_commit = commit_hash[0:7]
|
552 |
+
commit_info = f'<a style="text-decoration:none;color:inherit" href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/commit/{short_commit}">{short_commit}</a>'
|
553 |
else:
|
554 |
commit_info = "unknown \U0001F615"
|
555 |
+
return commit_info
|
556 |
+
|
557 |
+
def tag_html():
|
558 |
+
git = os.environ.get('GIT', "git")
|
559 |
+
try:
|
560 |
+
tag = run(f"{git} describe --tags --exact-match").strip()
|
561 |
+
except Exception:
|
562 |
+
tag = "<none>"
|
563 |
+
if tag != "<none>":
|
564 |
+
tag_info = f'<a style="text-decoration:none;color:inherit" href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/releases/tag/{tag}">{tag}</a>'
|
565 |
+
else:
|
566 |
+
tag_info = "unknown \U0001F615"
|
567 |
+
return tag_info
|
568 |
+
|
569 |
+
def repo_html():
|
570 |
+
commit_version = commit_html()
|
571 |
+
tag_version = tag_html()
|
572 |
+
return tag_version if tag_version != "unknown \U0001F615" else commit_version
|
573 |
+
|
574 |
+
def versions_html():
|
575 |
+
python_version = ".".join([str(x) for x in sys.version_info[0:3]])
|
576 |
+
repo_version = repo_html()
|
577 |
return f"""
|
578 |
Python: <span title="{sys.version}">{python_version}</span>
|
579 |
•
|
580 |
Gradio: {gr.__version__}
|
581 |
•
|
582 |
+
<a style="text-decoration:none;color:inherit" href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChat</a>: {repo_version}
|
583 |
"""
|
584 |
|
585 |
+
def version_time():
|
586 |
+
git = os.environ.get('GIT', "git")
|
587 |
+
try:
|
588 |
+
commit_time = run(f"TZ=UTC {git} log -1 --format=%cd --date='format-local:%Y-%m-%dT%H:%M:%SZ'").strip()
|
589 |
+
except Exception:
|
590 |
+
commit_time = "unknown"
|
591 |
+
return commit_time
|
592 |
+
|
593 |
def get_html(filename):
|
594 |
path = os.path.join(shared.chuanhu_path, "assets", "html", filename)
|
595 |
if os.path.exists(path):
|
readme/README_en.md
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
<div align="center">
|
8 |
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
-
<img src="https://
|
10 |
</a>
|
11 |
|
12 |
<p align="center">
|
@@ -44,6 +44,23 @@
|
|
44 |
</p>
|
45 |
</div>
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
## Usage Tips
|
48 |
|
49 |
- To better control the ChatGPT, use System Prompt.
|
@@ -51,11 +68,11 @@
|
|
51 |
- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
|
52 |
- To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
|
53 |
- To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
|
54 |
-
- To deploy the program onto a server,
|
55 |
-
- To get a public shared link,
|
56 |
- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
|
57 |
|
58 |
-
##
|
59 |
|
60 |
```shell
|
61 |
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
@@ -87,10 +104,6 @@ When you encounter problems, you should try manually pulling the latest changes
|
|
87 |
```
|
88 |
pip install -r requirements.txt
|
89 |
```
|
90 |
-
3. Update Gradio
|
91 |
-
```
|
92 |
-
pip install gradio --upgrade --force-reinstall
|
93 |
-
```
|
94 |
|
95 |
Generally, you can solve most problems by following these steps.
|
96 |
|
|
|
6 |
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
<div align="center">
|
8 |
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
+
<img src="https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63" alt="Logo" height="156">
|
10 |
</a>
|
11 |
|
12 |
<p align="center">
|
|
|
44 |
</p>
|
45 |
</div>
|
46 |
|
47 |
+
## Supported LLM Models
|
48 |
+
|
49 |
+
**LLM models via API**:
|
50 |
+
|
51 |
+
- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
|
52 |
+
- [Google PaLM](https://developers.generativeai.google/products/palm)
|
53 |
+
- [Inspur Yuan 1.0](https://air.inspur.com/home)
|
54 |
+
- [MiniMax](https://api.minimax.chat/)
|
55 |
+
- [XMChat](https://github.com/MILVLG/xmchat)
|
56 |
+
|
57 |
+
**LLM models via local deployment**:
|
58 |
+
|
59 |
+
- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
|
60 |
+
- [LLaMA](https://github.com/facebookresearch/llama)
|
61 |
+
- [StableLM](https://github.com/Stability-AI/StableLM)
|
62 |
+
- [MOSS](https://github.com/OpenLMLab/MOSS)
|
63 |
+
|
64 |
## Usage Tips
|
65 |
|
66 |
- To better control the ChatGPT, use System Prompt.
|
|
|
68 |
- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
|
69 |
- To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
|
70 |
- To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
|
71 |
+
- To deploy the program onto a server, set `"server_name": "0.0.0.0", "server_port" <your port number>,` in `config.json`.
|
72 |
+
- To get a public shared link, set `"share": true,` in `config.json`. Please be noted that the program must be running in order to be accessed via a public link.
|
73 |
- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
|
74 |
|
75 |
+
## Quickstart
|
76 |
|
77 |
```shell
|
78 |
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
|
|
104 |
```
|
105 |
pip install -r requirements.txt
|
106 |
```
|
|
|
|
|
|
|
|
|
107 |
|
108 |
Generally, you can solve most problems by following these steps.
|
109 |
|
readme/README_ja.md
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
<div align="center">
|
8 |
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
-
<img src="https://
|
10 |
</a>
|
11 |
|
12 |
<p align="center">
|
@@ -44,17 +44,34 @@
|
|
44 |
</p>
|
45 |
</div>
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
## 使う上でのTips
|
48 |
|
49 |
- ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
|
50 |
- プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
|
51 |
- 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
|
52 |
- 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
|
53 |
-
-
|
54 |
-
-
|
55 |
- Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
|
56 |
|
57 |
-
##
|
58 |
|
59 |
```shell
|
60 |
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
@@ -86,10 +103,6 @@ python ChuanhuChatbot.py
|
|
86 |
```
|
87 |
pip install -r requirements.txt
|
88 |
```
|
89 |
-
3. Gradioを更新
|
90 |
-
```
|
91 |
-
pip install gradio --upgrade --force-reinstall
|
92 |
-
```
|
93 |
|
94 |
一般的に、以下の手順でほとんどの問題を解決することができます。
|
95 |
|
|
|
6 |
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
<div align="center">
|
8 |
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
+
<img src="https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63" alt="Logo" height="156">
|
10 |
</a>
|
11 |
|
12 |
<p align="center">
|
|
|
44 |
</p>
|
45 |
</div>
|
46 |
|
47 |
+
## サポートされている大規模言語モデル
|
48 |
+
|
49 |
+
**APIを通じてアクセス可能な大規模言語モデル**:
|
50 |
+
|
51 |
+
- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
|
52 |
+
- [Google PaLM](https://developers.generativeai.google/products/palm)
|
53 |
+
- [Inspur Yuan 1.0](https://air.inspur.com/home)
|
54 |
+
- [MiniMax](https://api.minimax.chat/)
|
55 |
+
- [XMChat](https://github.com/MILVLG/xmchat)
|
56 |
+
|
57 |
+
**ローカルに展開された大規模言語モデル**:
|
58 |
+
|
59 |
+
- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
|
60 |
+
- [LLaMA](https://github.com/facebookresearch/llama)
|
61 |
+
- [StableLM](https://github.com/Stability-AI/StableLM)
|
62 |
+
- [MOSS](https://github.com/OpenLMLab/MOSS)
|
63 |
+
|
64 |
## 使う上でのTips
|
65 |
|
66 |
- ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
|
67 |
- プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
|
68 |
- 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
|
69 |
- 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
|
70 |
+
- プログラムをサーバーに展開するには、`config.json` 内の `"server_name": "0.0.0.0", "server_port": <ポート番号>`を設定してください。
|
71 |
+
- 共有リンクを取得するには、 `config.json` 内の `"share": true` を設定してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
|
72 |
- Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
|
73 |
|
74 |
+
## クイックスタート
|
75 |
|
76 |
```shell
|
77 |
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
|
|
103 |
```
|
104 |
pip install -r requirements.txt
|
105 |
```
|
|
|
|
|
|
|
|
|
106 |
|
107 |
一般的に、以下の手順でほとんどの問題を解決することができます。
|
108 |
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
gradio==3.
|
2 |
-
gradio_client==0.2.
|
3 |
pypinyin
|
4 |
tiktoken
|
5 |
socksio
|
@@ -16,10 +16,12 @@ commentjson
|
|
16 |
openpyxl
|
17 |
pandoc
|
18 |
wolframalpha
|
19 |
-
faiss-cpu
|
20 |
duckduckgo-search
|
21 |
arxiv
|
22 |
wikipedia
|
23 |
google.generativeai
|
24 |
openai
|
25 |
unstructured
|
|
|
|
|
|
1 |
+
gradio==3.36.1
|
2 |
+
gradio_client==0.2.7
|
3 |
pypinyin
|
4 |
tiktoken
|
5 |
socksio
|
|
|
16 |
openpyxl
|
17 |
pandoc
|
18 |
wolframalpha
|
19 |
+
faiss-cpu==1.7.4
|
20 |
duckduckgo-search
|
21 |
arxiv
|
22 |
wikipedia
|
23 |
google.generativeai
|
24 |
openai
|
25 |
unstructured
|
26 |
+
google-api-python-client
|
27 |
+
tabulate
|
run_Windows.bat
CHANGED
@@ -1,5 +1,24 @@
|
|
1 |
@echo off
|
2 |
echo Opening ChuanhuChatGPT...
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
@echo off
|
2 |
echo Opening ChuanhuChatGPT...
|
3 |
|
4 |
+
if not exist "%~dp0\ChuanhuChat\Scripts" (
|
5 |
+
echo Creating venv...
|
6 |
+
python -m venv ChuanhuChat
|
7 |
+
|
8 |
+
cd /d "%~dp0\ChuanhuChat\Scripts"
|
9 |
+
call activate.bat
|
10 |
+
|
11 |
+
cd /d "%~dp0"
|
12 |
+
pip install -r requirements.txt
|
13 |
+
)
|
14 |
+
|
15 |
+
goto :activate_venv
|
16 |
+
|
17 |
+
:launch
|
18 |
+
%PYTHON% ChuanhuChatbot.py %*
|
19 |
+
pause
|
20 |
+
|
21 |
+
:activate_venv
|
22 |
+
set PYTHON="%~dp0\ChuanhuChat\Scripts\Python.exe"
|
23 |
+
echo venv %PYTHON%
|
24 |
+
goto :launch
|