Spaces:
Running
Running
JohnSmith9982
commited on
Commit
•
c67c8ad
1
Parent(s):
064635f
Upload 39 files
Browse files- ChuanhuChatbot.py +23 -5
- config.json +2 -0
- config_example.json +2 -0
- locale/en_US.json +1 -1
- locale/ja_JP.json +1 -1
- modules/base_model.py +18 -7
- modules/config.py +7 -5
- modules/llama_func.py +1 -1
- modules/models.py +70 -23
- modules/presets.py +0 -4
- modules/utils.py +26 -11
- modules/webui_locale.py +2 -2
- readme/README_en.md +1 -1
- readme/README_ja.md +1 -1
ChuanhuChatbot.py
CHANGED
@@ -67,13 +67,18 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
67 |
retryBtn = gr.Button(i18n("🔄 重新生成"))
|
68 |
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
|
69 |
delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
with gr.Column():
|
72 |
with gr.Column(min_width=50, scale=1):
|
73 |
with gr.Tab(label=i18n("模型")):
|
74 |
keyTxt = gr.Textbox(
|
75 |
show_label=True,
|
76 |
-
placeholder=f"
|
77 |
value=hide_middle_chars(user_api_key.value),
|
78 |
type="password",
|
79 |
visible=not HIDE_MY_KEY,
|
@@ -101,7 +106,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
101 |
multiselect=False,
|
102 |
value=REPLY_LANGUAGES[0],
|
103 |
)
|
104 |
-
index_files = gr.Files(label=i18n("
|
105 |
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
|
106 |
# TODO: 公式ocr
|
107 |
# formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
|
@@ -269,6 +274,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
269 |
|
270 |
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
|
271 |
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
|
|
272 |
chatgpt_predict_args = dict(
|
273 |
fn=predict,
|
274 |
inputs=[
|
@@ -331,7 +337,6 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
331 |
outputs=[chatbot, status_display],
|
332 |
show_progress=True,
|
333 |
)
|
334 |
-
emptyBtn.click(**reset_textbox_args)
|
335 |
|
336 |
retryBtn.click(**start_outputing_args).then(
|
337 |
retry,
|
@@ -361,6 +366,20 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
361 |
show_progress=False
|
362 |
)
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
two_column.change(update_doc_config, [two_column], None)
|
365 |
|
366 |
# LLM Models
|
@@ -368,6 +387,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
368 |
keyTxt.submit(**get_usage_args)
|
369 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
370 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True)
|
|
|
371 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True)
|
372 |
|
373 |
# Template
|
@@ -443,9 +463,7 @@ demo.title = i18n("川虎Chat 🚀")
|
|
443 |
if __name__ == "__main__":
|
444 |
reload_javascript()
|
445 |
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
446 |
-
auth=auth_list if authflag else None,
|
447 |
favicon_path="./assets/favicon.ico",
|
448 |
-
inbrowser=not dockerflag, # 禁止在docker下开启inbrowser
|
449 |
)
|
450 |
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
|
451 |
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
|
|
|
67 |
retryBtn = gr.Button(i18n("🔄 重新生成"))
|
68 |
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
|
69 |
delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
|
70 |
+
with gr.Row(visible=False) as like_dislike_area:
|
71 |
+
with gr.Column(min_width=20, scale=1):
|
72 |
+
likeBtn = gr.Button(i18n("👍"))
|
73 |
+
with gr.Column(min_width=20, scale=1):
|
74 |
+
dislikeBtn = gr.Button(i18n("👎"))
|
75 |
|
76 |
with gr.Column():
|
77 |
with gr.Column(min_width=50, scale=1):
|
78 |
with gr.Tab(label=i18n("模型")):
|
79 |
keyTxt = gr.Textbox(
|
80 |
show_label=True,
|
81 |
+
placeholder=f"Your API-key...",
|
82 |
value=hide_middle_chars(user_api_key.value),
|
83 |
type="password",
|
84 |
visible=not HIDE_MY_KEY,
|
|
|
106 |
multiselect=False,
|
107 |
value=REPLY_LANGUAGES[0],
|
108 |
)
|
109 |
+
index_files = gr.Files(label=i18n("上传"), type="file")
|
110 |
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
|
111 |
# TODO: 公式ocr
|
112 |
# formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
|
|
|
274 |
|
275 |
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
|
276 |
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
277 |
+
demo.load(refresh_ui_elements_on_load, [current_model, model_select_dropdown], [like_dislike_area], show_progress=False)
|
278 |
chatgpt_predict_args = dict(
|
279 |
fn=predict,
|
280 |
inputs=[
|
|
|
337 |
outputs=[chatbot, status_display],
|
338 |
show_progress=True,
|
339 |
)
|
|
|
340 |
|
341 |
retryBtn.click(**start_outputing_args).then(
|
342 |
retry,
|
|
|
366 |
show_progress=False
|
367 |
)
|
368 |
|
369 |
+
likeBtn.click(
|
370 |
+
like,
|
371 |
+
[current_model],
|
372 |
+
[status_display],
|
373 |
+
show_progress=False
|
374 |
+
)
|
375 |
+
|
376 |
+
dislikeBtn.click(
|
377 |
+
dislike,
|
378 |
+
[current_model],
|
379 |
+
[status_display],
|
380 |
+
show_progress=False
|
381 |
+
)
|
382 |
+
|
383 |
two_column.change(update_doc_config, [two_column], None)
|
384 |
|
385 |
# LLM Models
|
|
|
387 |
keyTxt.submit(**get_usage_args)
|
388 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
389 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True)
|
390 |
+
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
391 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True)
|
392 |
|
393 |
# Template
|
|
|
463 |
if __name__ == "__main__":
|
464 |
reload_javascript()
|
465 |
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
|
|
466 |
favicon_path="./assets/favicon.ico",
|
|
|
467 |
)
|
468 |
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
|
469 |
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
|
config.json
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
}
|
config_example.json
CHANGED
@@ -2,6 +2,8 @@
|
|
2 |
// 你的OpenAI API Key,一般必填,
|
3 |
// 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
|
4 |
"openai_api_key": "",
|
|
|
|
|
5 |
"language": "auto",
|
6 |
// 如果使用代理,请取消注释下面的两行,并替换代理URL
|
7 |
// "https_proxy": "http://127.0.0.1:1079",
|
|
|
2 |
// 你的OpenAI API Key,一般必填,
|
3 |
// 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
|
4 |
"openai_api_key": "",
|
5 |
+
// 你的xmchat API Key,与OpenAI API Key不同
|
6 |
+
"xmchat_api_key": "",
|
7 |
"language": "auto",
|
8 |
// 如果使用代理,请取消注释下面的两行,并替换代理URL
|
9 |
// "https_proxy": "http://127.0.0.1:1079",
|
locale/en_US.json
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
"单轮对话": "Single-turn dialogue",
|
15 |
"使用在线搜索": "Use online search",
|
16 |
"选择回复语言(针对搜索&索引功能)": "Select reply language (for search & index)",
|
17 |
-
"上传索引文件": "Upload
|
18 |
"双栏pdf": "Two-column pdf",
|
19 |
"识别公式": "formula OCR",
|
20 |
"在这里输入System Prompt...": "Type in System Prompt here...",
|
|
|
14 |
"单轮对话": "Single-turn dialogue",
|
15 |
"使用在线搜索": "Use online search",
|
16 |
"选择回复语言(针对搜索&索引功能)": "Select reply language (for search & index)",
|
17 |
+
"上传索引文件": "Upload",
|
18 |
"双栏pdf": "Two-column pdf",
|
19 |
"识别公式": "formula OCR",
|
20 |
"在这里输入System Prompt...": "Type in System Prompt here...",
|
locale/ja_JP.json
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
"单轮对话": "単発会話",
|
15 |
"使用在线搜索": "オンライン検索を使用",
|
16 |
"选择回复语言(针对搜索&索引功能)": "回答言語を選択(検索とインデックス機能に対して)",
|
17 |
-
"上传索引文件": "
|
18 |
"双栏pdf": "2カラムpdf",
|
19 |
"识别公式": "formula OCR",
|
20 |
"在这里输入System Prompt...": "System Promptを入力してください...",
|
|
|
14 |
"单轮对话": "単発会話",
|
15 |
"使用在线搜索": "オンライン検索を使用",
|
16 |
"选择回复语言(针对搜索&索引功能)": "回答言語を選択(検索とインデックス機能に対して)",
|
17 |
+
"上传索引文件": "アップロード",
|
18 |
"双栏pdf": "2カラムpdf",
|
19 |
"识别公式": "formula OCR",
|
20 |
"在这里输入System Prompt...": "System Promptを入力してください...",
|
modules/base_model.py
CHANGED
@@ -29,7 +29,7 @@ class ModelType(Enum):
|
|
29 |
OpenAI = 0
|
30 |
ChatGLM = 1
|
31 |
LLaMA = 2
|
32 |
-
|
33 |
|
34 |
@classmethod
|
35 |
def get_type(cls, model_name: str):
|
@@ -42,7 +42,7 @@ class ModelType(Enum):
|
|
42 |
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
|
43 |
model_type = ModelType.LLaMA
|
44 |
elif "xmchat" in model_name_lower:
|
45 |
-
model_type = ModelType.
|
46 |
else:
|
47 |
model_type = ModelType.Unknown
|
48 |
return model_type
|
@@ -201,7 +201,7 @@ class BaseLLMModel:
|
|
201 |
msg = "索引获取成功,生成回答中……"
|
202 |
logging.info(msg)
|
203 |
if local_embedding or self.model_type != ModelType.OpenAI:
|
204 |
-
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
|
205 |
else:
|
206 |
embed_model = OpenAIEmbedding()
|
207 |
# yield chatbot + [(inputs, "")], msg
|
@@ -245,10 +245,11 @@ class BaseLLMModel:
|
|
245 |
domain_name = urllib3.util.parse_url(result["href"]).host
|
246 |
reference_results.append([result["body"], result["href"]])
|
247 |
display_append.append(
|
248 |
-
f"{idx+1}. [{domain_name}]({result['href']})\n"
|
|
|
249 |
)
|
250 |
reference_results = add_source_numbers(reference_results)
|
251 |
-
display_append = "
|
252 |
real_inputs = (
|
253 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
254 |
.replace("{query}", real_inputs)
|
@@ -463,9 +464,9 @@ class BaseLLMModel:
|
|
463 |
|
464 |
def set_key(self, new_access_key):
|
465 |
self.api_key = new_access_key.strip()
|
466 |
-
msg =
|
467 |
logging.info(msg)
|
468 |
-
return
|
469 |
|
470 |
def set_single_turn(self, new_single_turn):
|
471 |
self.single_turn = new_single_turn
|
@@ -548,3 +549,13 @@ class BaseLLMModel:
|
|
548 |
except FileNotFoundError:
|
549 |
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
550 |
return filename, self.system_prompt, chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
OpenAI = 0
|
30 |
ChatGLM = 1
|
31 |
LLaMA = 2
|
32 |
+
XMChat = 3
|
33 |
|
34 |
@classmethod
|
35 |
def get_type(cls, model_name: str):
|
|
|
42 |
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
|
43 |
model_type = ModelType.LLaMA
|
44 |
elif "xmchat" in model_name_lower:
|
45 |
+
model_type = ModelType.XMChat
|
46 |
else:
|
47 |
model_type = ModelType.Unknown
|
48 |
return model_type
|
|
|
201 |
msg = "索引获取成功,生成回答中……"
|
202 |
logging.info(msg)
|
203 |
if local_embedding or self.model_type != ModelType.OpenAI:
|
204 |
+
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
|
205 |
else:
|
206 |
embed_model = OpenAIEmbedding()
|
207 |
# yield chatbot + [(inputs, "")], msg
|
|
|
245 |
domain_name = urllib3.util.parse_url(result["href"]).host
|
246 |
reference_results.append([result["body"], result["href"]])
|
247 |
display_append.append(
|
248 |
+
# f"{idx+1}. [{domain_name}]({result['href']})\n"
|
249 |
+
f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
|
250 |
)
|
251 |
reference_results = add_source_numbers(reference_results)
|
252 |
+
display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
|
253 |
real_inputs = (
|
254 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
255 |
.replace("{query}", real_inputs)
|
|
|
464 |
|
465 |
def set_key(self, new_access_key):
|
466 |
self.api_key = new_access_key.strip()
|
467 |
+
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
|
468 |
logging.info(msg)
|
469 |
+
return self.api_key, msg
|
470 |
|
471 |
def set_single_turn(self, new_single_turn):
|
472 |
self.single_turn = new_single_turn
|
|
|
549 |
except FileNotFoundError:
|
550 |
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
551 |
return filename, self.system_prompt, chatbot
|
552 |
+
|
553 |
+
def like(self):
|
554 |
+
"""like the last response, implement if needed
|
555 |
+
"""
|
556 |
+
return gr.update()
|
557 |
+
|
558 |
+
def dislike(self):
|
559 |
+
"""dislike the last response, implement if needed
|
560 |
+
"""
|
561 |
+
return gr.update()
|
modules/config.py
CHANGED
@@ -32,9 +32,8 @@ if os.path.exists("config.json"):
|
|
32 |
else:
|
33 |
config = {}
|
34 |
|
35 |
-
|
36 |
-
language = os.environ.get("
|
37 |
-
|
38 |
|
39 |
if os.path.exists("api_key.txt"):
|
40 |
logging.info("检测到api_key.txt文件,正在进行迁移...")
|
@@ -66,8 +65,11 @@ if os.environ.get("dockerrun") == "yes":
|
|
66 |
dockerflag = True
|
67 |
|
68 |
## 处理 api-key 以及 允许的用户列表
|
69 |
-
my_api_key = config.get("openai_api_key", "")
|
70 |
-
my_api_key = os.environ.get("
|
|
|
|
|
|
|
71 |
|
72 |
## 多账户机制
|
73 |
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
|
|
|
32 |
else:
|
33 |
config = {}
|
34 |
|
35 |
+
lang_config = config.get("language", "auto")
|
36 |
+
language = os.environ.get("default_ui_lang", lang_config)
|
|
|
37 |
|
38 |
if os.path.exists("api_key.txt"):
|
39 |
logging.info("检测到api_key.txt文件,正在进行迁移...")
|
|
|
65 |
dockerflag = True
|
66 |
|
67 |
## 处理 api-key 以及 允许的用户列表
|
68 |
+
my_api_key = config.get("openai_api_key", "")
|
69 |
+
my_api_key = os.environ.get("my_api_key", my_api_key)
|
70 |
+
|
71 |
+
xmchat_api_key = config.get("xmchat_api_key", "")
|
72 |
+
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
73 |
|
74 |
## 多账户机制
|
75 |
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
|
modules/llama_func.py
CHANGED
@@ -134,7 +134,7 @@ def construct_index(
|
|
134 |
try:
|
135 |
documents = get_documents(file_src)
|
136 |
if local_embedding:
|
137 |
-
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
|
138 |
else:
|
139 |
embed_model = OpenAIEmbedding()
|
140 |
logging.info("构建索引中……")
|
|
|
134 |
try:
|
135 |
documents = get_documents(file_src)
|
136 |
if local_embedding:
|
137 |
+
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
|
138 |
else:
|
139 |
embed_model = OpenAIEmbedding()
|
140 |
logging.info("构建索引中……")
|
modules/models.py
CHANGED
@@ -9,6 +9,9 @@ import sys
|
|
9 |
import requests
|
10 |
import urllib3
|
11 |
import platform
|
|
|
|
|
|
|
12 |
|
13 |
from tqdm import tqdm
|
14 |
import colorama
|
@@ -96,6 +99,8 @@ class OpenAIClient(BaseLLMModel):
|
|
96 |
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
97 |
return status_text
|
98 |
except Exception as e:
|
|
|
|
|
99 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
100 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
101 |
|
@@ -204,6 +209,11 @@ class OpenAIClient(BaseLLMModel):
|
|
204 |
if error_msg:
|
205 |
raise Exception(error_msg)
|
206 |
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
class ChatGLM_Client(BaseLLMModel):
|
209 |
def __init__(self, model_name) -> None:
|
@@ -328,15 +338,6 @@ class LLaMA_Client(BaseLLMModel):
|
|
328 |
data_args=data_args,
|
329 |
pipeline_args=pipeline_args,
|
330 |
)
|
331 |
-
# Chats
|
332 |
-
# model_name = model_args.model_name_or_path
|
333 |
-
# if model_args.lora_model_path is not None:
|
334 |
-
# model_name += f" + {model_args.lora_model_path}"
|
335 |
-
|
336 |
-
# context = (
|
337 |
-
# "You are a helpful assistant who follows the given instructions"
|
338 |
-
# " unconditionally."
|
339 |
-
# )
|
340 |
|
341 |
def _get_llama_style_input(self):
|
342 |
history = []
|
@@ -391,7 +392,7 @@ class LLaMA_Client(BaseLLMModel):
|
|
391 |
yield partial_text
|
392 |
|
393 |
|
394 |
-
class
|
395 |
def __init__(self, api_key):
|
396 |
super().__init__(model_name="xmchat")
|
397 |
self.api_key = api_key
|
@@ -401,36 +402,77 @@ class XMBot_Client(BaseLLMModel):
|
|
401 |
self.image_path = None
|
402 |
self.xm_history = []
|
403 |
self.url = "https://xmbot.net/web"
|
|
|
404 |
|
405 |
def reset(self):
|
406 |
self.session_id = str(uuid.uuid4())
|
|
|
407 |
return [], "已重置"
|
408 |
|
409 |
-
def
|
410 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
411 |
|
|
|
412 |
def is_image_file(filepath):
|
413 |
# 判断文件是否为图片
|
414 |
valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"]
|
415 |
file_extension = os.path.splitext(filepath)[1].lower()
|
416 |
return file_extension in valid_image_extensions
|
417 |
|
418 |
-
def read_image_as_bytes(filepath):
|
419 |
-
# 读取图片文件并返回比特流
|
420 |
-
with open(filepath, "rb") as f:
|
421 |
-
image_bytes = f.read()
|
422 |
-
return image_bytes
|
423 |
-
|
424 |
if is_image_file(filepath):
|
425 |
logging.info(f"读取图片文件: {filepath}")
|
426 |
-
image_bytes =
|
427 |
-
base64_encoded_image = base64.b64encode(image_bytes).decode()
|
428 |
-
self.image_bytes = base64_encoded_image
|
429 |
self.image_path = filepath
|
430 |
else:
|
431 |
self.image_bytes = None
|
432 |
self.image_path = None
|
433 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
434 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
435 |
fake_inputs = real_inputs
|
436 |
display_append = ""
|
@@ -448,6 +490,8 @@ class XMBot_Client(BaseLLMModel):
|
|
448 |
chatbot = chatbot + [((self.image_path,), None)]
|
449 |
if self.image_bytes is not None:
|
450 |
logging.info("使用图片作为输入")
|
|
|
|
|
451 |
conv_id = str(uuid.uuid4())
|
452 |
data = {
|
453 |
"user_id": self.api_key,
|
@@ -464,6 +508,7 @@ class XMBot_Client(BaseLLMModel):
|
|
464 |
def get_answer_at_once(self):
|
465 |
question = self.history[-1]["content"]
|
466 |
conv_id = str(uuid.uuid4())
|
|
|
467 |
data = {
|
468 |
"user_id": self.api_key,
|
469 |
"session_id": self.session_id,
|
@@ -528,8 +573,10 @@ def get_model(
|
|
528 |
else:
|
529 |
msg += f" + {lora_model_path}"
|
530 |
model = LLaMA_Client(model_name, lora_model_path)
|
531 |
-
elif model_type == ModelType.
|
532 |
-
|
|
|
|
|
533 |
elif model_type == ModelType.Unknown:
|
534 |
raise ValueError(f"未知模型: {model_name}")
|
535 |
logging.info(msg)
|
|
|
9 |
import requests
|
10 |
import urllib3
|
11 |
import platform
|
12 |
+
import base64
|
13 |
+
from io import BytesIO
|
14 |
+
from PIL import Image
|
15 |
|
16 |
from tqdm import tqdm
|
17 |
import colorama
|
|
|
99 |
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
100 |
return status_text
|
101 |
except Exception as e:
|
102 |
+
import traceback
|
103 |
+
traceback.print_exc()
|
104 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
105 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
106 |
|
|
|
209 |
if error_msg:
|
210 |
raise Exception(error_msg)
|
211 |
|
212 |
+
def set_key(self, new_access_key):
|
213 |
+
ret = super().set_key(new_access_key)
|
214 |
+
self._refresh_header()
|
215 |
+
return ret
|
216 |
+
|
217 |
|
218 |
class ChatGLM_Client(BaseLLMModel):
|
219 |
def __init__(self, model_name) -> None:
|
|
|
338 |
data_args=data_args,
|
339 |
pipeline_args=pipeline_args,
|
340 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
|
342 |
def _get_llama_style_input(self):
|
343 |
history = []
|
|
|
392 |
yield partial_text
|
393 |
|
394 |
|
395 |
+
class XMChat(BaseLLMModel):
|
396 |
def __init__(self, api_key):
|
397 |
super().__init__(model_name="xmchat")
|
398 |
self.api_key = api_key
|
|
|
402 |
self.image_path = None
|
403 |
self.xm_history = []
|
404 |
self.url = "https://xmbot.net/web"
|
405 |
+
self.last_conv_id = None
|
406 |
|
407 |
def reset(self):
|
408 |
self.session_id = str(uuid.uuid4())
|
409 |
+
self.last_conv_id = None
|
410 |
return [], "已重置"
|
411 |
|
412 |
+
def image_to_base64(self, image_path):
|
413 |
+
# 打开并加载图片
|
414 |
+
img = Image.open(image_path)
|
415 |
+
|
416 |
+
# 获取图片的宽度和高度
|
417 |
+
width, height = img.size
|
418 |
+
|
419 |
+
# 计算压缩比例,以确保最长边小于4096像素
|
420 |
+
max_dimension = 2048
|
421 |
+
scale_ratio = min(max_dimension / width, max_dimension / height)
|
422 |
+
|
423 |
+
if scale_ratio < 1:
|
424 |
+
# 按压缩比例调整图片大小
|
425 |
+
new_width = int(width * scale_ratio)
|
426 |
+
new_height = int(height * scale_ratio)
|
427 |
+
img = img.resize((new_width, new_height), Image.ANTIALIAS)
|
428 |
+
|
429 |
+
# 将图片转换为jpg格式的二进制数据
|
430 |
+
buffer = BytesIO()
|
431 |
+
if img.mode == "RGBA":
|
432 |
+
img = img.convert("RGB")
|
433 |
+
img.save(buffer, format='JPEG')
|
434 |
+
binary_image = buffer.getvalue()
|
435 |
+
|
436 |
+
# 对二进制数据进行Base64编码
|
437 |
+
base64_image = base64.b64encode(binary_image).decode('utf-8')
|
438 |
+
|
439 |
+
return base64_image
|
440 |
|
441 |
+
def try_read_image(self, filepath):
|
442 |
def is_image_file(filepath):
|
443 |
# 判断文件是否为图片
|
444 |
valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"]
|
445 |
file_extension = os.path.splitext(filepath)[1].lower()
|
446 |
return file_extension in valid_image_extensions
|
447 |
|
|
|
|
|
|
|
|
|
|
|
|
|
448 |
if is_image_file(filepath):
|
449 |
logging.info(f"读取图片文件: {filepath}")
|
450 |
+
self.image_bytes = self.image_to_base64(filepath)
|
|
|
|
|
451 |
self.image_path = filepath
|
452 |
else:
|
453 |
self.image_bytes = None
|
454 |
self.image_path = None
|
455 |
|
456 |
+
def like(self):
|
457 |
+
if self.last_conv_id is None:
|
458 |
+
return "点赞失败,你还没发送过消息"
|
459 |
+
data = {
|
460 |
+
"uuid": self.last_conv_id,
|
461 |
+
"appraise": "good"
|
462 |
+
}
|
463 |
+
response = requests.post(self.url, json=data)
|
464 |
+
return "👍点赞成功,,感谢反馈~"
|
465 |
+
|
466 |
+
def dislike(self):
|
467 |
+
if self.last_conv_id is None:
|
468 |
+
return "点踩失败,你还没发送过消息"
|
469 |
+
data = {
|
470 |
+
"uuid": self.last_conv_id,
|
471 |
+
"appraise": "bad"
|
472 |
+
}
|
473 |
+
response = requests.post(self.url, json=data)
|
474 |
+
return "👎点踩成功,感谢反馈~"
|
475 |
+
|
476 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
477 |
fake_inputs = real_inputs
|
478 |
display_append = ""
|
|
|
490 |
chatbot = chatbot + [((self.image_path,), None)]
|
491 |
if self.image_bytes is not None:
|
492 |
logging.info("使用图片作为输入")
|
493 |
+
# XMChat的一轮对话中实际上只能处理一张图片
|
494 |
+
self.reset()
|
495 |
conv_id = str(uuid.uuid4())
|
496 |
data = {
|
497 |
"user_id": self.api_key,
|
|
|
508 |
def get_answer_at_once(self):
|
509 |
question = self.history[-1]["content"]
|
510 |
conv_id = str(uuid.uuid4())
|
511 |
+
self.last_conv_id = conv_id
|
512 |
data = {
|
513 |
"user_id": self.api_key,
|
514 |
"session_id": self.session_id,
|
|
|
573 |
else:
|
574 |
msg += f" + {lora_model_path}"
|
575 |
model = LLaMA_Client(model_name, lora_model_path)
|
576 |
+
elif model_type == ModelType.XMChat:
|
577 |
+
if os.environ.get("XMCHAT_API_KEY") != "":
|
578 |
+
access_key = os.environ.get("XMCHAT_API_KEY")
|
579 |
+
model = XMChat(api_key=access_key)
|
580 |
elif model_type == ModelType.Unknown:
|
581 |
raise ValueError(f"未知模型: {model_name}")
|
582 |
logging.info(msg)
|
modules/presets.py
CHANGED
@@ -75,12 +75,8 @@ LOCAL_MODELS = [
|
|
75 |
"chatglm-6b-int4",
|
76 |
"chatglm-6b-int4-qe",
|
77 |
"llama-7b-hf",
|
78 |
-
"llama-7b-hf-int4",
|
79 |
-
"llama-7b-hf-int8",
|
80 |
"llama-13b-hf",
|
81 |
-
"llama-13b-hf-int4",
|
82 |
"llama-30b-hf",
|
83 |
-
"llama-30b-hf-int4",
|
84 |
"llama-65b-hf"
|
85 |
]
|
86 |
|
|
|
75 |
"chatglm-6b-int4",
|
76 |
"chatglm-6b-int4-qe",
|
77 |
"llama-7b-hf",
|
|
|
|
|
78 |
"llama-13b-hf",
|
|
|
79 |
"llama-30b-hf",
|
|
|
80 |
"llama-65b-hf"
|
81 |
]
|
82 |
|
modules/utils.py
CHANGED
@@ -113,6 +113,12 @@ def set_single_turn(current_model, *args):
|
|
113 |
def handle_file_upload(current_model, *args):
|
114 |
return current_model.handle_file_upload(*args)
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
def count_token(message):
|
118 |
encoding = tiktoken.get_encoding("cl100k_base")
|
@@ -451,11 +457,11 @@ Error code: {result.returncode}""")
|
|
451 |
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
|
452 |
if result.returncode != 0:
|
453 |
message = f"""{errdesc or 'Error running command'}.
|
454 |
-
Command: {command}
|
455 |
-
Error code: {result.returncode}
|
456 |
-
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
|
457 |
-
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
|
458 |
-
"""
|
459 |
raise RuntimeError(message)
|
460 |
return result.stdout.decode(encoding="utf8", errors="ignore")
|
461 |
|
@@ -472,12 +478,12 @@ def versions_html():
|
|
472 |
else:
|
473 |
commit_info = "unknown \U0001F615"
|
474 |
return f"""
|
475 |
-
Python: <span title="{sys.version}">{python_version}</span>
|
476 |
-
|
477 |
-
Gradio: {gr.__version__}
|
478 |
-
|
479 |
-
Commit: {commit_info}
|
480 |
-
"""
|
481 |
|
482 |
def add_source_numbers(lst, source_name = "Source", use_source = True):
|
483 |
if use_source:
|
@@ -531,3 +537,12 @@ def get_last_day_of_month(any_day):
|
|
531 |
def get_model_source(model_name, alternative_source):
|
532 |
if model_name == "gpt2-medium":
|
533 |
return "https://huggingface.co/gpt2-medium"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
def handle_file_upload(current_model, *args):
|
114 |
return current_model.handle_file_upload(*args)
|
115 |
|
116 |
+
def like(current_model, *args):
|
117 |
+
return current_model.like(*args)
|
118 |
+
|
119 |
+
def dislike(current_model, *args):
|
120 |
+
return current_model.dislike(*args)
|
121 |
+
|
122 |
|
123 |
def count_token(message):
|
124 |
encoding = tiktoken.get_encoding("cl100k_base")
|
|
|
457 |
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
|
458 |
if result.returncode != 0:
|
459 |
message = f"""{errdesc or 'Error running command'}.
|
460 |
+
Command: {command}
|
461 |
+
Error code: {result.returncode}
|
462 |
+
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
|
463 |
+
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
|
464 |
+
"""
|
465 |
raise RuntimeError(message)
|
466 |
return result.stdout.decode(encoding="utf8", errors="ignore")
|
467 |
|
|
|
478 |
else:
|
479 |
commit_info = "unknown \U0001F615"
|
480 |
return f"""
|
481 |
+
Python: <span title="{sys.version}">{python_version}</span>
|
482 |
+
•
|
483 |
+
Gradio: {gr.__version__}
|
484 |
+
•
|
485 |
+
Commit: {commit_info}
|
486 |
+
"""
|
487 |
|
488 |
def add_source_numbers(lst, source_name = "Source", use_source = True):
|
489 |
if use_source:
|
|
|
537 |
def get_model_source(model_name, alternative_source):
|
538 |
if model_name == "gpt2-medium":
|
539 |
return "https://huggingface.co/gpt2-medium"
|
540 |
+
|
541 |
+
def refresh_ui_elements_on_load(current_model, selected_model_name):
|
542 |
+
return toggle_like_btn_visibility(selected_model_name)
|
543 |
+
|
544 |
+
def toggle_like_btn_visibility(selected_model_name):
|
545 |
+
if selected_model_name == "xmchat":
|
546 |
+
return gr.update(visible=True)
|
547 |
+
else:
|
548 |
+
return gr.update(visible=False)
|
modules/webui_locale.py
CHANGED
@@ -9,8 +9,8 @@ class I18nAuto:
|
|
9 |
config = json.load(f)
|
10 |
else:
|
11 |
config = {}
|
12 |
-
|
13 |
-
language = os.environ.get("
|
14 |
if language == "auto":
|
15 |
language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
|
16 |
self.language_map = {}
|
|
|
9 |
config = json.load(f)
|
10 |
else:
|
11 |
config = {}
|
12 |
+
lang_config = config.get("language", "auto")
|
13 |
+
language = os.environ.get("default_ui_lang", lang_config)
|
14 |
if language == "auto":
|
15 |
language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
|
16 |
self.language_map = {}
|
readme/README_en.md
CHANGED
@@ -122,6 +122,6 @@ More information could be found in our [wiki](https://github.com/GaiZhenbiao/Chu
|
|
122 |
|
123 |
🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
|
124 |
|
125 |
-
<a href="https://www.buymeacoffee.com/ChuanhuChat"
|
126 |
|
127 |
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|
|
|
122 |
|
123 |
🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
|
124 |
|
125 |
+
<a href="https://www.buymeacoffee.com/ChuanhuChat" ><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=219d53&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
|
126 |
|
127 |
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|
readme/README_ja.md
CHANGED
@@ -121,6 +121,6 @@ python ChuanhuChatbot.py
|
|
121 |
|
122 |
🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。
|
123 |
|
124 |
-
<a href="https://www.buymeacoffee.com/ChuanhuChat"
|
125 |
|
126 |
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|
|
|
121 |
|
122 |
🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。
|
123 |
|
124 |
+
<a href="https://www.buymeacoffee.com/ChuanhuChat" ><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=219d53&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
|
125 |
|
126 |
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|