diff --git a/CITATION.cff b/CITATION.cff
index c1b2475a4d12546ffe61d3d2530e954cc43a0563..ea3e1503a4aff8e954bb36b1bba6370f81f239f6 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -1,5 +1,5 @@
cff-version: 1.2.0
-title: ChuanhuChatGPT
+title: Chuanhu Chat
message: >-
If you use this software, please cite it using these
metadata.
@@ -13,8 +13,8 @@ authors:
orcid: https://orcid.org/0009-0005-0357-272X
repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
-abstract: Provided a light and easy to use interface for ChatGPT API
+abstract: This software provides a light and easy-to-use interface for ChatGPT API and many LLMs.
license: GPL-3.0
-commit: bd0034c37e5af6a90bd9c2f7dd073f6cd27c61af
-version: '20230405'
-date-released: '2023-04-05'
+commit: c6c08bc62ef80e37c8be52f65f9b6051a7eea1fa
+version: '20230709'
+date-released: '2023-07-09'
diff --git a/ChuanhuChatbot.py b/ChuanhuChatbot.py
index 890e5c7ec70f26a0452ded3e33cd56f488819932..d498359af5c02037247406830672bcbbdbb7006b 100644
--- a/ChuanhuChatbot.py
+++ b/ChuanhuChatbot.py
@@ -1,8 +1,11 @@
# -*- coding:utf-8 -*-
-import os
import logging
-import sys
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
+)
+import colorama
import gradio as gr
from modules import config
@@ -10,6 +13,9 @@ from modules.config import *
from modules.utils import *
from modules.presets import *
from modules.overwrites import *
+from modules.webui import *
+from modules.repo import *
+from modules.train_func import *
from modules.models.models import get_model
logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -17,13 +23,13 @@ logging.getLogger("httpx").setLevel(logging.WARNING)
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
gr.Chatbot.postprocess = postprocess
-with open("assets/custom.css", "r", encoding="utf-8") as f:
- customCSS = f.read()
+# with open("web_assets/css/ChuanhuChat.css", "r", encoding="utf-8") as f:
+# ChuanhuChatCSS = f.read()
def create_new_model():
return get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0]
-with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
+with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.State("")
promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
user_question = gr.State("")
@@ -34,31 +40,45 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
topic = gr.State(i18n("未命名对话历史记录"))
with gr.Row():
- gr.HTML(CHUANHU_TITLE, elem_id="app_title")
- status_display = gr.Markdown(get_geoip(), elem_id="status_display")
- with gr.Row(elem_id="float_display"):
- user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
-
- with gr.Row().style(equal_height=True):
+ gr.HTML(CHUANHU_TITLE, elem_id="app-title")
+ status_display = gr.Markdown(get_geoip(), elem_id="status-display")
+ with gr.Row(elem_id="float-display"):
+ user_info = gr.Markdown(value="getting user info...", elem_id="user-info")
+ config_info = gr.HTML(get_html("config_info.html").format(bot_avatar=config.bot_avatar, user_avatar=config.user_avatar), visible=False, elem_id="config-info")
+ update_info = gr.HTML(get_html("update.html").format(
+ current_version=repo_tag_html(),
+ version_time=version_time(),
+ cancel_btn=i18n("取消"),
+ update_btn=i18n("更新"),
+ seenew_btn=i18n("详情"),
+ ok_btn=i18n("好"),
+ ), visible=check_update)
+
+ with gr.Row(equal_height=True):
with gr.Column(scale=5):
with gr.Row():
- chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu_chatbot").style(height="100%")
+ chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu-chatbot", latex_delimiters=latex_delimiters_set, height=700)
with gr.Row():
with gr.Column(min_width=225, scale=12):
user_input = gr.Textbox(
- elem_id="user_input_tb",
- show_label=False, placeholder=i18n("在这里输入")
- ).style(container=False)
+ elem_id="user-input-tb",
+ show_label=False, placeholder=i18n("在这里输入"),
+ container=False
+ )
with gr.Column(min_width=42, scale=1):
- submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn")
- cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn")
- with gr.Row():
- emptyBtn = gr.Button(
- i18n("🧹 新的对话"), elem_id="empty_btn"
- )
- retryBtn = gr.Button(i18n("🔄 重新生成"))
- delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
- delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
+ submitBtn = gr.Button(value="", variant="primary", elem_id="submit-btn")
+ cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel-btn")
+ with gr.Row(elem_id="chatbot-buttons"):
+ with gr.Column(min_width=120, scale=1):
+ emptyBtn = gr.Button(
+ i18n("🧹 新的对话"), elem_id="empty-btn"
+ )
+ with gr.Column(min_width=120, scale=1):
+ retryBtn = gr.Button(i18n("🔄 重新生成"))
+ with gr.Column(min_width=120, scale=1):
+ delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
+ with gr.Column(min_width=120, scale=1):
+ delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(i18n("👍"))
@@ -77,9 +97,9 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
label="API-Key",
)
if multi_api_key:
- usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block")
+ usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage-display", elem_classes="insert-block", visible=show_api_billing)
else:
- usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block")
+ usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage-display", elem_classes="insert-block", visible=show_api_billing)
model_select_dropdown = gr.Dropdown(
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True
)
@@ -87,15 +107,15 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
)
with gr.Row():
- single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False)
- use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False)
+ single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False, elem_classes="switch-checkbox")
+ use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False, elem_classes="switch-checkbox")
language_select_dropdown = gr.Dropdown(
label=i18n("选择回复语言(针对搜索&索引功能)"),
choices=REPLY_LANGUAGES,
multiselect=False,
value=REPLY_LANGUAGES[0],
)
- index_files = gr.Files(label=i18n("上传"), type="file")
+ index_files = gr.Files(label=i18n("上传"), type="file", elem_id="upload-index-file")
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
summarize_btn = gr.Button(i18n("总结"))
# TODO: 公式ocr
@@ -107,8 +127,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
placeholder=i18n("在这里输入System Prompt..."),
label="System prompt",
value=INITIAL_SYSTEM_PROMPT,
- lines=10,
- ).style(container=False)
+ lines=10
+ )
with gr.Accordion(label=i18n("加载Prompt模板"), open=True):
with gr.Column():
with gr.Row():
@@ -118,7 +138,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
choices=get_template_names(plain=True),
multiselect=False,
value=get_template_names(plain=True)[0],
- ).style(container=False)
+ container=False,
+ )
with gr.Column(scale=1):
templateRefreshBtn = gr.Button(i18n("🔄 刷新"))
with gr.Row():
@@ -129,7 +150,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
get_template_names(plain=True)[0], mode=1
),
multiselect=False,
- ).style(container=False)
+ container=False,
+ )
with gr.Tab(label=i18n("保存/加载")):
with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True):
@@ -139,10 +161,14 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
historyFileSelectDropdown = gr.Dropdown(
label=i18n("从列表中加载对话"),
choices=get_history_names(plain=True),
- multiselect=False
+ multiselect=False,
+ container=False,
)
- with gr.Column(scale=1):
- historyRefreshBtn = gr.Button(i18n("🔄 刷新"))
+ with gr.Row():
+ with gr.Column(min_width=42, scale=1):
+ historyRefreshBtn = gr.Button(i18n("🔄 刷新"))
+ with gr.Column(min_width=42, scale=1):
+ historyDeleteBtn = gr.Button(i18n("🗑️ 删除"))
with gr.Row():
with gr.Column(scale=6):
saveFileName = gr.Textbox(
@@ -150,7 +176,9 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
label=i18n("设置保存文件名"),
value=i18n("对话历史记录"),
- ).style(container=True)
+ elem_classes="no-container"
+ # container=False,
+ )
with gr.Column(scale=1):
saveHistoryBtn = gr.Button(i18n("💾 保存对话"))
exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown"))
@@ -159,12 +187,32 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
with gr.Column():
downloadFile = gr.File(interactive=True)
+ with gr.Tab(label=i18n("微调")):
+ openai_train_status = gr.Markdown(label=i18n("训练状态"), value=i18n("在这里[查看使用介绍](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/%E4%BD%BF%E7%94%A8%E6%95%99%E7%A8%8B#%E5%BE%AE%E8%B0%83-gpt-35)"))
+
+ with gr.Tab(label=i18n("准备数据集")):
+ dataset_preview_json = gr.JSON(label=i18n("数据集预览"), readonly=True)
+ dataset_selection = gr.Files(label = i18n("选择数据集"), file_types=[".xlsx", ".jsonl"], file_count="single")
+ upload_to_openai_btn = gr.Button(i18n("上传到OpenAI"), variant="primary", interactive=False)
+
+ with gr.Tab(label=i18n("训练")):
+ openai_ft_file_id = gr.Textbox(label=i18n("文件ID"), value="", lines=1, placeholder=i18n("上传到 OpenAI 后自动填充"))
+ openai_ft_suffix = gr.Textbox(label=i18n("模型名称后缀"), value="", lines=1, placeholder=i18n("可选,用于区分不同的模型"))
+ openai_train_epoch_slider = gr.Slider(label=i18n("训练轮数(Epochs)"), minimum=1, maximum=100, value=3, step=1, interactive=True)
+ openai_start_train_btn = gr.Button(i18n("开始训练"), variant="primary", interactive=False)
+
+ with gr.Tab(label=i18n("状态")):
+ openai_status_refresh_btn = gr.Button(i18n("刷新状态"))
+ openai_cancel_all_jobs_btn = gr.Button(i18n("取消所有任务"))
+ add_to_models_btn = gr.Button(i18n("添加训练好的模型到模型列表"), interactive=False)
+
with gr.Tab(label=i18n("高级")):
- gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置"))
- gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert_block")
+ gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert-block")
use_streaming_checkbox = gr.Checkbox(
- label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION
+ label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION, elem_classes="switch-checkbox"
)
+ checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
+ gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"), elem_id="advanced-warning")
with gr.Accordion(i18n("参数"), open=False):
temperature_slider = gr.Slider(
minimum=-0,
@@ -192,7 +240,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
)
stop_sequence_txt = gr.Textbox(
show_label=True,
- placeholder=i18n("在这里输入停止符,用英文逗号隔开..."),
+ placeholder=i18n("停止符,用英文逗号隔开..."),
label="stop",
value="",
lines=1,
@@ -244,25 +292,36 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
lines=1,
)
- with gr.Accordion(i18n("网络设置"), open=False, visible=False):
+ with gr.Accordion(i18n("网络参数"), open=False):
+ gr.Markdown(i18n("---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置"), elem_id="netsetting-warning")
+ default_btn = gr.Button(i18n("🔙 恢复默认网络设置"))
+ # 网络代理
+ proxyTxt = gr.Textbox(
+ show_label=True,
+ placeholder=i18n("未设置代理..."),
+ label=i18n("代理地址"),
+ value=config.http_proxy,
+ lines=1,
+ interactive=False,
+ # container=False,
+ elem_classes="view-only-textbox no-container",
+ )
+ # changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
+
# 优先展示自定义的api_host
apihostTxt = gr.Textbox(
show_label=True,
- placeholder=i18n("在这里输入API-Host..."),
- label="API-Host",
+ placeholder="api.openai.com",
+ label="OpenAI API-Host",
value=config.api_host or shared.API_HOST,
lines=1,
+ interactive=False,
+ # container=False,
+ elem_classes="view-only-textbox no-container",
)
- changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
- proxyTxt = gr.Textbox(
- show_label=True,
- placeholder=i18n("在这里输入代理地址..."),
- label=i18n("代理地址(示例:http://127.0.0.1:10809)"),
- value="",
- lines=2,
- )
- changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
- default_btn = gr.Button(i18n("🔙 恢复默认设置"))
+ # changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
+ updateChuanhuBtn = gr.Button(visible=False, elem_classes="invisible-btn", elem_id="update-chuanhu-btn")
+
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
@@ -323,6 +382,10 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
outputs=[saveFileName, systemPromptTxt, chatbot]
)
+ refresh_history_args = dict(
+ fn=get_history_names, inputs=[gr.State(False), user_name], outputs=[historyFileSelectDropdown]
+ )
+
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
@@ -341,6 +404,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
inputs=[current_model],
outputs=[chatbot, status_display],
show_progress=True,
+ _js='clearChatbot',
)
retryBtn.click(**start_outputing_args).then(
@@ -391,7 +455,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args)
keyTxt.submit(**get_usage_args)
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
- model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, api_name="get_model")
+ model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown, user_api_key, keyTxt], show_progress=True, api_name="get_model")
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True)
@@ -425,10 +489,23 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
downloadFile,
show_progress=True,
)
- historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
+ historyRefreshBtn.click(**refresh_history_args)
+ historyDeleteBtn.click(delete_chat_history, [current_model, historyFileSelectDropdown, user_name], [status_display, historyFileSelectDropdown, chatbot], _js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}')
historyFileSelectDropdown.change(**load_history_from_file_args)
downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
+ # Train
+ dataset_selection.upload(handle_dataset_selection, dataset_selection, [dataset_preview_json, upload_to_openai_btn, openai_train_status])
+ dataset_selection.clear(handle_dataset_clear, [], [dataset_preview_json, upload_to_openai_btn])
+ upload_to_openai_btn.click(upload_to_openai, [dataset_selection], [openai_ft_file_id, openai_train_status], show_progress=True)
+
+ openai_ft_file_id.change(lambda x: gr.update(interactive=True) if len(x) > 0 else gr.update(interactive=False), [openai_ft_file_id], [openai_start_train_btn])
+ openai_start_train_btn.click(start_training, [openai_ft_file_id, openai_ft_suffix, openai_train_epoch_slider], [openai_train_status])
+
+ openai_status_refresh_btn.click(get_training_status, [], [openai_train_status, add_to_models_btn])
+ add_to_models_btn.click(add_to_models, [], [model_select_dropdown, openai_train_status], show_progress=True)
+ openai_cancel_all_jobs_btn.click(cancel_all_jobs, [], [openai_train_status], show_progress=True)
+
# Advanced
max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
@@ -444,15 +521,24 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
default_btn.click(
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
)
- changeAPIURLBtn.click(
- change_api_host,
- [apihostTxt],
- [status_display],
- show_progress=True,
- )
- changeProxyBtn.click(
- change_proxy,
- [proxyTxt],
+ # changeAPIURLBtn.click(
+ # change_api_host,
+ # [apihostTxt],
+ # [status_display],
+ # show_progress=True,
+ # )
+ # changeProxyBtn.click(
+ # change_proxy,
+ # [proxyTxt],
+ # [status_display],
+ # show_progress=True,
+ # )
+ checkUpdateBtn.click(fn=None, _js='manualCheckUpdate')
+
+ # Invisible elements
+ updateChuanhuBtn.click(
+ update_chuanhu,
+ [],
[status_display],
show_progress=True,
)
@@ -469,5 +555,5 @@ if __name__ == "__main__":
reload_javascript()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
blocked_paths=["config.json"],
- favicon_path="./assets/favicon.ico"
+ favicon_path="./web_assets/favicon.ico",
)
diff --git a/Dockerfile b/Dockerfile
index 335c2dba28ba8c365de9306858462a59dea25f28..85d5045d5316ac160277af1e7d60afa823c0f953 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,15 +1,18 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
+FROM python:3.9-slim-buster as builder
+RUN apt-get update \
+ && apt-get install -y build-essential \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
COPY requirements_advanced.txt .
-RUN pip install --user -r requirements.txt
-# RUN pip install --user -r requirements_advanced.txt
+RUN pip install --user --no-cache-dir -r requirements.txt
+# RUN pip install --user --no-cache-dir -r requirements_advanced.txt
-FROM python:3.9
-MAINTAINER iskoldt
+FROM python:3.9-slim-buster
+LABEL maintainer="iskoldt"
COPY --from=builder /root/.local /root/.local
ENV PATH=/root/.local/bin:$PATH
COPY . /app
WORKDIR /app
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
+ENV dockerrun=yes
+CMD ["python3", "-u", "ChuanhuChatbot.py","2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/config_example.json b/config_example.json
index 7998f24524eb7d80d1fc8b7048ab64f4dacdd974..0b77caefbb39ef08d6a53b3b40ee67bb8a3b1576 100644
--- a/config_example.json
+++ b/config_example.json
@@ -1,24 +1,60 @@
{
- // 你的OpenAI API Key,一般必填,
- // 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
- "openai_api_key": "",
- // 你的xmchat API Key,与OpenAI API Key不同
- "xmchat_api_key": "",
- "language": "auto",
- // 如果使用代理,请取消注释下面的两行,并替换代理URL
- // "https_proxy": "http://127.0.0.1:1079",
- // "http_proxy": "http://127.0.0.1:1079",
+ // 各配置具体说明,见 [https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#配置-configjson]
+
+ //== API 配置 ==
+ "openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
+ "google_palm_api_key": "", // 你的 Google PaLM API Key,用于 Google PaLM 对话模型
+ "xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
+ "minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
+ "minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
+ "midjourney_proxy_api_base": "https://xxx/mj", // 你的 https://github.com/novicezk/midjourney-proxy 代理地址
+ "midjourney_proxy_api_secret": "", // 你的 MidJourney Proxy API Secret,用于鉴权访问 api,可选
+ "midjourney_discord_proxy_url": "", // 你的 MidJourney Discord Proxy URL,用于对生成对图进行反代,可选
+ "midjourney_temp_folder": "./tmp", // 你的 MidJourney 临时文件夹,用于存放生成的图片,填空则关闭自动下载切图(直接显示MJ的四宫格图)
+
+
+ //== Azure ==
+ "openai_api_type": "openai", // 可选项:azure, openai
+ "azure_openai_api_key": "", // 你的 Azure OpenAI API Key,用于 Azure OpenAI 对话模型
+ "azure_openai_api_base_url": "", // 你的 Azure Base URL
+ "azure_openai_api_version": "2023-05-15", // 你的 Azure OpenAI API 版本
+ "azure_deployment_name": "", // 你的 Azure OpenAI Chat 模型 Deployment 名称
+ "azure_embedding_deployment_name": "", // 你的 Azure OpenAI Embedding 模型 Deployment 名称
+ "azure_embedding_model_name": "text-embedding-ada-002", // 你的 Azure OpenAI Embedding 模型名称
+
+ //== 基础配置 ==
+ "language": "auto", // 界面语言,可选"auto", "zh-CN", "en-US", "ja-JP", "ko-KR", "sv-SE"
"users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
"local_embedding": false, //是否在本地编制索引
+ "hide_history_when_not_logged_in": false, //未登录情况下是否不展示对话历史
+ "check_update": true, //是否启用检查更新
"default_model": "gpt-3.5-turbo", // 默认模型
+ "bot_avatar": "default", // 机器人头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
+ "user_avatar": "default", // 用户头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
+
+ //== API 用量 ==
+ "show_api_billing": false, //是否显示OpenAI API用量(启用需要填写sensitive_id)
+ "sensitive_id": "", // 你 OpenAI 账户的 Sensitive ID,用于查询 API 用量
+ "usage_limit": 120, // 该 OpenAI API Key 的当月限额,单位:美元,用于计算百分比和显示上限
+ "legacy_api_usage": false, // 是否使用旧版 API 用量查询接口(OpenAI现已关闭该接口,但是如果你在使用第三方 API,第三方可能仍然支持此接口)
+
+ //== 川虎助理设置 ==
+ "default_chuanhu_assistant_model": "gpt-4", //川虎助理使用的模型,可选gpt-3.5-turbo或者gpt-4等
+ "GOOGLE_CSE_ID": "", //谷歌搜索引擎ID,用于川虎助理Pro模式,获取方式请看 https://stackoverflow.com/questions/37083058/programmatically-searching-google-in-python-using-custom-search
+ "GOOGLE_API_KEY": "", //谷歌API Key,用于川虎助理Pro模式
+ "WOLFRAM_ALPHA_APPID": "", //Wolfram Alpha API Key,用于川虎助理Pro模式,获取方式请看 https://products.wolframalpha.com/api/
+ "SERPAPI_API_KEY": "", //SerpAPI API Key,用于川虎助理Pro模式,获取方式请看 https://serpapi.com/
+
+ //== 文档处理与显示 ==
+ "latex_option": "default", // LaTeX 公式渲染策略,可选"default", "strict", "all"或者"disabled"
"advance_docs": {
"pdf": {
- // 是否认为PDF是双栏的
- "two_column": false,
- // 是否使用OCR识别PDF中的公式
- "formula_ocr": true
+ "two_column": false, // 是否认为PDF是双栏的
+ "formula_ocr": true // 是否使用OCR识别PDF中的公式
}
},
+
+ //== 高级配置 ==
// 是否多个API Key轮换使用
"multi_api_key": false,
"api_key_list": [
@@ -26,7 +62,12 @@
"sk-xxxxxxxxxxxxxxxxxxxxxxxx2",
"sk-xxxxxxxxxxxxxxxxxxxxxxxx3"
],
- // 如果使用自定义端口、自定义ip,请取消注释并替换对应内容
+ // 自定义OpenAI API Base
+ // "openai_api_base": "https://api.openai.com",
+ // 自定义使用代理(请替换代理URL)
+ // "https_proxy": "http://127.0.0.1:1079",
+ // "http_proxy": "http://127.0.0.1:1079",
+ // 自定义端口、自定义ip(请替换对应内容)
// "server_name": "0.0.0.0",
// "server_port": 7860,
// 如果要share到gradio,设置为true
diff --git a/locale/en_US.json b/locale/en_US.json
index 09f00893344b0b587c4a384f3bcf6d48064e5fa0..17a5aa618ee8e1c4425a7ce69e1d86adfbd24b6c 100644
--- a/locale/en_US.json
+++ b/locale/en_US.json
@@ -32,24 +32,33 @@
"📝 导出为Markdown": "📝 Export as Markdown",
"默认保存于history文件夹": "Default save in history folder",
"高级": "Advanced",
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ Caution: Changes require care. ⚠️\n\nIf unable to use, restore default settings.",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Caution: Changes require care. ⚠️",
"参数": "Parameters",
- "在这里输入停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
+ "停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
"用于定位滥用行为": "Used to locate abuse",
"用户名": "Username",
- "网络设置": "Network Settings",
"在这里输入API-Host...": "Type in API-Host here...",
"🔄 切换API地址": "🔄 Switch API Address",
- "在这里输入代理地址...": "Type in proxy address here...",
- "代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809)",
+ "未设置代理...": "No proxy...",
+ "代理地址": "Proxy address",
"🔄 设置代理地址": "🔄 Set Proxy Address",
- "🔙 恢复默认设置": "🔙 Restore Default Settings",
+ "🔙 恢复默认网络设置": "🔙 Reset Network Settings",
+ "🔄 检查更新...": "🔄 Check for Update...",
+ "取消": "Cancel",
+ "更新": "Update",
+ "详情": "Details",
+ "好": "OK",
+ "更新成功,请重启本程序": "Updated successfully, please restart this program",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Update failed, please try [manually updating](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
"川虎Chat 🚀": "Chuanhu Chat 🚀",
"开始实时传输回答……": "Start streaming output...",
"Token 计数: ": "Token Count: ",
- ",本次对话累计消耗了 ": ",Total cost for this dialogue is ",
+ ",本次对话累计消耗了 ": ", Total cost for this dialogue is ",
"**获取API使用情况失败**": "**Failed to get API usage**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Failed to get API usage**, correct sensitive_id needed in `config.json`",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Failed to get API usage**, wrong or expired sensitive_id",
"**本月使用金额** ": "**Monthly usage** ",
+ "本月使用金额": "Monthly usage",
"获取API使用情况失败:": "Failed to get API usage:",
"API密钥更改为了": "The API key is changed to",
"JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
@@ -64,10 +73,15 @@
"API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
"请输入对话内容。": "Enter the content of the conversation.",
"账单信息不适用": "Billing information is not applicable",
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "developor: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) and [明昭MZhao](https://space.bilibili.com/24807452)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
"切换亮暗色主题": "Switch light/dark theme",
"您的IP区域:未知。": "Your IP region: Unknown.",
"获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
- "您的IP区域:": "Your IP region: "
+ "您的IP区域:": "Your IP region: ",
+ "总结": "Summarize",
+ "生成内容总结中……": "Generating content summary...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Due to the following reasons, Google refuses to provide an answer to PaLM: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ To ensure the security of API-Key, please modify the network settings in the configuration file `config.json`.",
+ "网络参数": "Network parameter"
}
diff --git a/locale/ja_JP.json b/locale/ja_JP.json
index 1acbe7103ef01beb81a8039a77981af8fa31e402..db8fb8441bb669848c5eec4644d5b3e8d814060a 100644
--- a/locale/ja_JP.json
+++ b/locale/ja_JP.json
@@ -32,24 +32,33 @@
"📝 导出为Markdown": "📝 Markdownでエクスポート",
"默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
"高级": "Advanced",
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ 変更には慎重に ⚠️\n\nもし動作しない場合は、デフォルト設定に戻してください。",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 変更には慎重に ⚠️",
"参数": "パラメータ",
- "在这里输入停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
+ "停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
"用于定位滥用行为": "不正行為を特定するために使用されます",
"用户名": "ユーザー名",
- "网络设置": "ネットワーク設定",
"在这里输入API-Host...": "API-Hostを入力してください...",
"🔄 切换API地址": "🔄 APIアドレスを切り替え",
- "在这里输入代理地址...": "プロキシアドレスを入力してください...",
- "代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
+ "未设置代理...": "代理が設定されていません...",
+ "代理地址": "プロキシアドレス",
"🔄 设置代理地址": "🔄 プロキシアドレスを設定",
- "🔙 恢复默认设置": "🔙 デフォルト設定に戻す",
+ "🔙 恢复默认网络设置": "🔙 ネットワーク設定のリセット",
+ "🔄 检查更新...": "🔄 アップデートをチェック...",
+ "取消": "キャンセル",
+ "更新": "アップデート",
+ "详情": "詳細",
+ "好": "はい",
+ "更新成功,请重启本程序": "更新が成功しました、このプログラムを再起動してください",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "更新に失敗しました、[手動での更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)をお試しください。",
"川虎Chat 🚀": "川虎Chat 🚀",
"开始实时传输回答……": "ストリーム出力開始……",
"Token 计数: ": "Token数: ",
",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
"**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API使用状況の取得に失敗しました**、`config.json`に正しい`sensitive_id`を入力する必要があります",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API使用状況の取得に失敗しました**、sensitive_idが間違っているか、期限切れです",
"**本月使用金额** ": "**今月の使用料金** ",
+ "本月使用金额": "今月の使用料金",
"获取API使用情况失败:": "API使用状況の取得に失敗しました:",
"API密钥更改为了": "APIキーが変更されました",
"JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
@@ -64,10 +73,15 @@
"API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
"请输入对话内容。": "会話内容を入力してください。",
"账单信息不适用": "課金情報は対象外です",
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
"切换亮暗色主题": "テーマの明暗切替",
"您的IP区域:未知。": "あなたのIPアドレス地域:不明",
"获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
- "您的IP区域:": "あなたのIPアドレス地域:"
-}
\ No newline at end of file
+ "您的IP区域:": "あなたのIPアドレス地域:",
+ "总结": "要約する",
+ "生成内容总结中……": "コンテンツ概要を生成しています...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Googleは以下の理由から、PaLMの回答を返すことを拒否しています:\n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ APIキーの安全性を確保するために、`config.json`ファイルでネットワーク設定を変更してください。",
+ "网络参数": "ネットワークパラメータ"
+}
diff --git a/locale/ko_KR.json b/locale/ko_KR.json
new file mode 100644
index 0000000000000000000000000000000000000000..a7f45732eeae5b65930a078c0b326c9659abd270
--- /dev/null
+++ b/locale/ko_KR.json
@@ -0,0 +1,89 @@
+{
+ "未命名对话历史记录": "이름없는 대화 기록",
+ "在这里输入": "여기에 입력하세요",
+ "🧹 新的对话": "🧹 새로운 대화",
+ "🔄 重新生成": "🔄 재생성",
+ "🗑️ 删除最旧对话": "🗑️ 가장 오래된 대화 삭제",
+ "🗑️ 删除最新对话": "🗑️ 최신 대화 삭제",
+ "🗑️ 删除": "🗑️ 삭제",
+ "模型": "LLM 모델",
+ "多账号模式已开启,无需输入key,可直接开始对话": "다중 계정 모드가 활성화되어 있으므로 키를 입력할 필요가 없이 바로 대화를 시작할 수 있습니다",
+ "**发送消息** 或 **提交key** 以显示额度": "**메세지를 전송** 하거나 **Key를 입력**하여 크레딧 표시",
+ "选择模型": "모델 선택",
+ "选择LoRA模型": "LoRA 모델 선택",
+ "实时传输回答": "실시간 전송",
+ "单轮对话": "단일 대화",
+ "使用在线搜索": "온라인 검색 사용",
+ "选择回复语言(针对搜索&索引功能)": "답장 언어 선택 (검색 & 인덱스용)",
+ "上传索引文件": "업로드",
+ "双栏pdf": "2-column pdf",
+ "识别公式": "formula OCR",
+ "在这里输入System Prompt...": "여기에 시스템 프롬프트를 입력하세요...",
+ "加载Prompt模板": "프롬프트 템플릿 불러오기",
+ "选择Prompt模板集合文件": "프롬프트 콜렉션 파일 선택",
+ "🔄 刷新": "🔄 새로고침",
+ "从Prompt模板中加载": "프롬프트 템플릿에서 불러오기",
+ "保存/加载": "저장/불러오기",
+ "保存/加载对话历史记录": "대화 기록 저장/불러오기",
+ "从列表中加载对话": "리스트에서 대화 불러오기",
+ "设置文件名: 默认为.json,可选为.md": "파일 이름 설정: 기본값: .json, 선택: .md",
+ "设置保存文件名": "저장 파일명 설정",
+ "对话历史记录": "대화 기록",
+ "💾 保存对话": "💾 대화 저장",
+ "📝 导出为Markdown": "📝 마크다운으로 내보내기",
+ "默认保存于history文件夹": "히스토리 폴더에 기본 저장",
+ "高级": "고급",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 주의: 변경시 주의하세요. ⚠️",
+ "参数": "파라미터들",
+ "停止符,用英文逗号隔开...": "여기에 정지 토큰 입력, ','로 구분됨...",
+ "用于定位滥用行为": "악용 사례 파악에 활용됨",
+ "用户名": "사용자 이름",
+ "在这里输入API-Host...": "여기에 API host를 입력하세요...",
+ "🔄 切换API地址": "🔄 API 주소 변경",
+ "未设置代理...": "대리인이 설정되지 않았습니다...",
+ "代理地址": "프록시 주소",
+ "🔄 设置代理地址": "🔄 프록시 주소 설정",
+ "🔙 恢复默认网络设置": "🔙 네트워크 설정 초기화",
+ "🔄 检查更新...": "🔄 업데이트 확인...",
+ "取消": "취소",
+ "更新": "업데이트",
+ "详情": "상세",
+ "好": "예",
+ "更新成功,请重启本程序": "업데이트 성공, 이 프로그램을 재시작 해주세요",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "업데이트 실패, [수동 업데이트](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)를 시도하십시오",
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
+ "开始实时传输回答……": "실시간 응답 출력 시작...",
+ "Token 计数: ": "토큰 수: ",
+ ",本次对话累计消耗了 ": ",이 대화의 전체 비용은 ",
+ "**获取API使用情况失败**": "**API 사용량 가져오기 실패**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API 사용량 가져오기 실패**. `config.json`에 올바른 `sensitive_id`를 입력해야 합니다",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API 사용량 가져오기 실패**. sensitive_id가 잘못되었거나 만료되었습니다",
+ "**本月使用金额** ": "**이번 달 사용금액** ",
+ "本月使用金额": "이번 달 사용금액",
+ "获取API使用情况失败:": "API 사용량 가져오기 실패:",
+ "API密钥更改为了": "API 키가 변경되었습니다.",
+ "JSON解析错误,收到的内容: ": "JSON 파싱 에러, 응답: ",
+ "模型设置为了:": "설정된 모델: ",
+ "☹️发生了错误:": "☹️에러: ",
+ "获取对话时发生错误,请查看后台日志": "대화를 가져오는 중 에러가 발생했습니다. 백그라운드 로그를 확인하세요",
+ "请检查网络连接,或者API-Key是否有效。": "네트워크 연결 또는 API키가 유효한지 확인하세요",
+ "连接超时,无法获取对话。": "연결 시간 초과, 대화를 가져올 수 없습니다.",
+ "读取超时,无法获取对话。": "읽기 시간 초과, 대화를 가져올 수 없습니다.",
+ "代理错误,无法获取对话。": "프록시 에러, 대화를 가져올 수 없습니다.",
+ "SSL错误,无法获取对话。": "SSL 에러, 대화를 가져올 수 없습니다.",
+ "API key为空,请检查是否输入正确。": "API 키가 비어 있습니다. 올바르게 입력되었는지 확인하십세요.",
+ "请输入对话内容。": "대화 내용을 입력하세요.",
+ "账单信息不适用": "청구 정보를 가져올 수 없습니다",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "제작: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452), [Keldos](https://github.com/Keldos-Li)\n\n최신 코드 다운로드: [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "切换亮暗色主题": "라이트/다크 테마 전환",
+ "您的IP区域:未知。": "IP 지역: 알 수 없음.",
+ "获取IP地理位置失败。原因:": "다음과 같은 이유로 IP 위치를 가져올 수 없습니다. 이유: ",
+ "。你仍然可以使用聊天功能。": ". 채팅 기능을 계속 사용할 수 있습니다.",
+ "您的IP区域:": "당신의 IP 지역: ",
+ "总结": "요약",
+ "生成内容总结中……": "콘텐츠 요약 생성중...",
+ "上传": "업로드",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "구글은 다음과 같은 이유로 인해 PaLM의 응답을 거부합니다: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ API-Key의 안전을 보장하기 위해 네트워크 설정을 `config.json` 구성 파일에서 수정해주세요.",
+ "网络参数": "네트워크 매개변수"
+}
diff --git a/locale/sv-SE.json b/locale/sv-SE.json
new file mode 100644
index 0000000000000000000000000000000000000000..4d3c9627fd967724fceac2a55aaff6b434b70c1b
--- /dev/null
+++ b/locale/sv-SE.json
@@ -0,0 +1,87 @@
+{
+ "未命名对话历史记录": "Onämnd Dialoghistorik",
+ "在这里输入": "Skriv in här",
+ "🧹 新的对话": "🧹 Ny Dialog",
+ "🔄 重新生成": "🔄 Regenerera",
+ "🗑️ 删除最旧对话": "🗑️ Ta bort äldsta dialogen",
+ "🗑️ 删除最新对话": "🗑️ Ta bort senaste dialogen",
+ "模型": "Modell",
+ "多账号模式已开启,无需输入key,可直接开始对话": "Flerkontoläge är aktiverat, ingen nyckel behövs, du kan starta dialogen direkt",
+ "**发送消息** 或 **提交key** 以显示额度": "**Skicka meddelande** eller **Skicka in nyckel** för att visa kredit",
+ "选择模型": "Välj Modell",
+ "选择LoRA模型": "Välj LoRA Modell",
+ "实时传输回答": "Strömmande utdata",
+ "单轮对话": "Enkel dialog",
+ "使用在线搜索": "Använd online-sökning",
+ "选择回复语言(针对搜索&索引功能)": "Välj svarspråk (för sök- och indexfunktion)",
+ "上传索引文件": "Ladda upp",
+ "双栏pdf": "Två-kolumns pdf",
+ "识别公式": "Formel OCR",
+ "在这里输入System Prompt...": "Skriv in System Prompt här...",
+ "加载Prompt模板": "Ladda Prompt-mall",
+ "选择Prompt模板集合文件": "Välj Prompt-mall Samlingsfil",
+ "🔄 刷新": "🔄 Uppdatera",
+ "从Prompt模板中加载": "Ladda från Prompt-mall",
+ "保存/加载": "Spara/Ladda",
+ "保存/加载对话历史记录": "Spara/Ladda Dialoghistorik",
+ "从列表中加载对话": "Ladda dialog från lista",
+ "设置文件名: 默认为.json,可选为.md": "Ställ in filnamn: standard är .json, valfritt är .md",
+ "设置保存文件名": "Ställ in sparfilnamn",
+ "对话历史记录": "Dialoghistorik",
+ "💾 保存对话": "💾 Spara Dialog",
+ "📝 导出为Markdown": "📝 Exportera som Markdown",
+ "默认保存于history文件夹": "Sparas som standard i mappen history",
+ "高级": "Avancerat",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Var försiktig med ändringar. ⚠️",
+ "参数": "Parametrar",
+ "停止符,用英文逗号隔开...": "Skriv in stopptecken här, separerade med kommatecken...",
+ "用于定位滥用行为": "Används för att lokalisera missbruk",
+ "用户名": "Användarnamn",
+ "在这里输入API-Host...": "Skriv in API-Host här...",
+ "🔄 切换API地址": "🔄 Byt API-adress",
+ "未设置代理...": "Inte inställd proxy...",
+ "代理地址": "Proxyadress",
+ "🔄 设置代理地址": "🔄 Ställ in Proxyadress",
+ "🔙 恢复网络默认设置": "🔙 Återställ Nätverksinställningar",
+ "🔄 检查更新...": "🔄 Sök efter uppdateringar...",
+ "取消": "Avbryt",
+ "更新": "Uppdatera",
+ "详情": "Detaljer",
+ "好": "OK",
+ "更新成功,请重启本程序": "Uppdaterat framgångsrikt, starta om programmet",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Uppdateringen misslyckades, prova att [uppdatera manuellt](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
+ "开始实时传输回答……": "Börjar strömma utdata...",
+ "Token 计数: ": "Tokenräkning: ",
+ ",本次对话累计消耗了 ": ", Total kostnad för denna dialog är ",
+ "**获取API使用情况失败**": "**Misslyckades med att hämta API-användning**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Misslyckades med att hämta API-användning**, korrekt sensitive_id behövs i `config.json`",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Misslyckades med att hämta API-användning**, felaktig eller utgången sensitive_id",
+ "**本月使用金额** ": "**Månadens användning** ",
+ "本月使用金额": "Månadens användning",
+ "获取API使用情况失败:": "Misslyckades med att hämta API-användning:",
+ "API密钥更改为了": "API-nyckeln har ändrats till",
+ "JSON解析错误,收到的内容: ": "JSON-tolkningsfel, mottaget innehåll: ",
+ "模型设置为了:": "Modellen är inställd på: ",
+ "☹️发生了错误:": "☹️Fel: ",
+ "获取对话时发生错误,请查看后台日志": "Ett fel uppstod när dialogen hämtades, kontrollera bakgrundsloggen",
+ "请检查网络连接,或者API-Key是否有效。": "Kontrollera nätverksanslutningen eller om API-nyckeln är giltig.",
+ "连接超时,无法获取对话。": "Anslutningen tog för lång tid, kunde inte hämta dialogen.",
+ "读取超时,无法获取对话。": "Läsningen tog för lång tid, kunde inte hämta dialogen.",
+ "代理错误,无法获取对话。": "Proxyfel, kunde inte hämta dialogen.",
+ "SSL错误,无法获取对话。": "SSL-fel, kunde inte hämta dialogen.",
+ "API key为空,请检查是否输入正确。": "API-nyckeln är tom, kontrollera om den är korrekt inmatad.",
+ "请输入对话内容。": "Ange dialoginnehåll.",
+ "账单信息不适用": "Faktureringsinformation är inte tillämplig",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Utvecklad av Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) och [Keldos](https://github.com/Keldos-Li)\n\nLadda ner senaste koden från [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "切换亮暗色主题": "Byt ljus/mörk tema",
+ "您的IP区域:未知。": "Din IP-region: Okänd.",
+ "获取IP地理位置失败。原因:": "Misslyckades med att hämta IP-plats. Orsak: ",
+ "。你仍然可以使用聊天功能。": ". Du kan fortfarande använda chattfunktionen.",
+ "您的IP区域:": "Din IP-region: ",
+ "总结": "Sammanfatta",
+ "生成内容总结中……": "Genererar innehållssammanfattning...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "På grund av följande skäl vägrar Google att ge ett svar till PaLM: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ För att säkerställa säkerheten för API-nyckeln, vänligen ändra nätverksinställningarna i konfigurationsfilen `config.json`.",
+ "网络参数": "nätverksparametrar"
+}
diff --git a/modules/.DS_Store b/modules/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..b568306d92f15378a29b2420f594020c5ea7d5fa
Binary files /dev/null and b/modules/.DS_Store differ
diff --git a/modules/__pycache__/config.cpython-311.pyc b/modules/__pycache__/config.cpython-311.pyc
index 814b6c81b46088e3da8bf033612613d118bcafc3..9530da3815908ec3787cd4abe832c5a7616b51cd 100644
Binary files a/modules/__pycache__/config.cpython-311.pyc and b/modules/__pycache__/config.cpython-311.pyc differ
diff --git a/modules/__pycache__/config.cpython-39.pyc b/modules/__pycache__/config.cpython-39.pyc
index 74c61920214d19c533ec6eaa6d1243f91937bc7d..4e2d78243a1c2f7b4b0633bfcdcdb6379e1943f3 100644
Binary files a/modules/__pycache__/config.cpython-39.pyc and b/modules/__pycache__/config.cpython-39.pyc differ
diff --git a/modules/__pycache__/index_func.cpython-311.pyc b/modules/__pycache__/index_func.cpython-311.pyc
index 637f2271c8683f759fb8a253b19ce9589b50074a..a993b842af381300d650ebfe736cc5865225fead 100644
Binary files a/modules/__pycache__/index_func.cpython-311.pyc and b/modules/__pycache__/index_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/index_func.cpython-39.pyc b/modules/__pycache__/index_func.cpython-39.pyc
index e5331c8816453dbb4fae6e6061cd2c2a4214194a..b97db89c58f233333a9eb6bf72fd871bb3cc4a29 100644
Binary files a/modules/__pycache__/index_func.cpython-39.pyc and b/modules/__pycache__/index_func.cpython-39.pyc differ
diff --git a/modules/__pycache__/overwrites.cpython-311.pyc b/modules/__pycache__/overwrites.cpython-311.pyc
index 4cbf7c22204cf40a1b62baf83da402791763c404..cdf5755ea2443f0f2b64f1da6ee85be078b76a7c 100644
Binary files a/modules/__pycache__/overwrites.cpython-311.pyc and b/modules/__pycache__/overwrites.cpython-311.pyc differ
diff --git a/modules/__pycache__/overwrites.cpython-39.pyc b/modules/__pycache__/overwrites.cpython-39.pyc
index f31912fa0b49ee69112454b36ed33d8546ff9d1b..3d54035e7c8937f1d0fae198be3a2c862468e026 100644
Binary files a/modules/__pycache__/overwrites.cpython-39.pyc and b/modules/__pycache__/overwrites.cpython-39.pyc differ
diff --git a/modules/__pycache__/pdf_func.cpython-311.pyc b/modules/__pycache__/pdf_func.cpython-311.pyc
index c5225f07eca638916ed5da2c5e1d248d29432300..e2b10156a9940c6f0c470fb86682fcc574e5a80c 100644
Binary files a/modules/__pycache__/pdf_func.cpython-311.pyc and b/modules/__pycache__/pdf_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/presets.cpython-311.pyc b/modules/__pycache__/presets.cpython-311.pyc
index 417a5228acc399992d06f44baab1ecd2a0e2f393..666229ab5bb4b74e06b75326f0bf67a5990cb398 100644
Binary files a/modules/__pycache__/presets.cpython-311.pyc and b/modules/__pycache__/presets.cpython-311.pyc differ
diff --git a/modules/__pycache__/presets.cpython-39.pyc b/modules/__pycache__/presets.cpython-39.pyc
index d4c24f132f0874d16073a80681cab1c26631ba79..8ea74040f3b8124051ba6565d1a733dd3546cee4 100644
Binary files a/modules/__pycache__/presets.cpython-39.pyc and b/modules/__pycache__/presets.cpython-39.pyc differ
diff --git a/modules/__pycache__/repo.cpython-311.pyc b/modules/__pycache__/repo.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8619438e55aa7d7af505f298ca8602d8ba21e0b2
Binary files /dev/null and b/modules/__pycache__/repo.cpython-311.pyc differ
diff --git a/modules/__pycache__/shared.cpython-311.pyc b/modules/__pycache__/shared.cpython-311.pyc
index 0916f22b230a897be92f6535bbca83fe7f53e86f..14621e716a8d3758c252ae7ccc4c42451b4e3d13 100644
Binary files a/modules/__pycache__/shared.cpython-311.pyc and b/modules/__pycache__/shared.cpython-311.pyc differ
diff --git a/modules/__pycache__/shared.cpython-39.pyc b/modules/__pycache__/shared.cpython-39.pyc
index 2c616f792e6e67d427badfd73c06edaf8796c9db..049e6cf0ee5f24ca3aa5346b9f5f810f37b0a025 100644
Binary files a/modules/__pycache__/shared.cpython-39.pyc and b/modules/__pycache__/shared.cpython-39.pyc differ
diff --git a/modules/__pycache__/train_func.cpython-311.pyc b/modules/__pycache__/train_func.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b79271fc649a7635932ab60408e4aadcd143fd1f
Binary files /dev/null and b/modules/__pycache__/train_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/utils.cpython-311.pyc b/modules/__pycache__/utils.cpython-311.pyc
index 17298f3a636270dcf70303a286df9b1605f841a8..5ddb1e91c08c0aa62e5c31b3afd29c07e64008e0 100644
Binary files a/modules/__pycache__/utils.cpython-311.pyc and b/modules/__pycache__/utils.cpython-311.pyc differ
diff --git a/modules/__pycache__/utils.cpython-39.pyc b/modules/__pycache__/utils.cpython-39.pyc
index d81ba43da611907a5693a9a7e363c459d9191195..f4cbd0c64bbe16dd098fb92346f36a16ad64833d 100644
Binary files a/modules/__pycache__/utils.cpython-39.pyc and b/modules/__pycache__/utils.cpython-39.pyc differ
diff --git a/modules/__pycache__/webui.cpython-311.pyc b/modules/__pycache__/webui.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e027ee26faf0e36cce97a84afaa5ef05a4df603
Binary files /dev/null and b/modules/__pycache__/webui.cpython-311.pyc differ
diff --git a/modules/config.py b/modules/config.py
index c9224996dd7056508519be8cbe906746f362abb0..77f8bc62737cb08ae38a9345ae8dc420fb643f25 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -11,11 +11,11 @@ from . import presets
__all__ = [
"my_api_key",
+ "sensitive_id",
"authflag",
"auth_list",
"dockerflag",
"retrieve_proxy",
- "log_level",
"advance_docs",
"update_doc_config",
"usage_limit",
@@ -23,8 +23,11 @@ __all__ = [
"server_name",
"server_port",
"share",
+ "check_update",
+ "latex_delimiters_set",
"hide_history_when_not_logged_in",
- "default_chuanhu_assistant_model"
+ "default_chuanhu_assistant_model",
+ "show_api_billing"
]
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
@@ -35,10 +38,22 @@ if os.path.exists("config.json"):
else:
config = {}
+
+def load_config_to_environ(key_list):
+ global config
+ for key in key_list:
+ if key in config:
+ os.environ[key.upper()] = os.environ.get(key.upper(), config[key])
+
+
lang_config = config.get("language", "auto")
language = os.environ.get("LANGUAGE", lang_config)
-hide_history_when_not_logged_in = config.get("hide_history_when_not_logged_in", False)
+hide_history_when_not_logged_in = config.get(
+ "hide_history_when_not_logged_in", False)
+check_update = config.get("check_update", True)
+show_api_billing = config.get("show_api_billing", False)
+show_api_billing = bool(os.environ.get("SHOW_API_BILLING", show_api_billing))
if os.path.exists("api_key.txt"):
logging.info("检测到api_key.txt文件,正在进行迁移...")
@@ -52,26 +67,44 @@ if os.path.exists("auth.json"):
logging.info("检测到auth.json文件,正在进行迁移...")
auth_list = []
with open("auth.json", "r", encoding='utf-8') as f:
- auth = json.load(f)
- for _ in auth:
- if auth[_]["username"] and auth[_]["password"]:
- auth_list.append((auth[_]["username"], auth[_]["password"]))
- else:
- logging.error("请检查auth.json文件中的用户名和密码!")
- sys.exit(1)
+ auth = json.load(f)
+ for _ in auth:
+ if auth[_]["username"] and auth[_]["password"]:
+ auth_list.append((auth[_]["username"], auth[_]["password"]))
+ else:
+ logging.error("请检查auth.json文件中的用户名和密码!")
+ sys.exit(1)
config["users"] = auth_list
os.rename("auth.json", "auth(deprecated).json")
with open("config.json", "w", encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
-## 处理docker if we are running in Docker
+# 处理docker if we are running in Docker
dockerflag = config.get("dockerflag", False)
if os.environ.get("dockerrun") == "yes":
dockerflag = True
-## 处理 api-key 以及 允许的用户列表
+# 处理 api-key 以及 允许的用户列表
my_api_key = config.get("openai_api_key", "")
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
+os.environ["OPENAI_API_KEY"] = my_api_key
+os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
+
+if config.get("legacy_api_usage", False):
+ sensitive_id = my_api_key
+else:
+ sensitive_id = config.get("sensitive_id", "")
+ sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
+
+# 模型配置
+if "extra_models" in config:
+ presets.MODELS.extend(config["extra_models"])
+ logging.info(f"已添加额外的模型:{config['extra_models']}")
+
+google_palm_api_key = config.get("google_palm_api_key", "")
+google_palm_api_key = os.environ.get(
+ "GOOGLE_PALM_API_KEY", google_palm_api_key)
+os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
xmchat_api_key = config.get("xmchat_api_key", "")
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
@@ -81,11 +114,23 @@ os.environ["MINIMAX_API_KEY"] = minimax_api_key
minimax_group_id = config.get("minimax_group_id", "")
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
+midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
+os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
+midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
+os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
+midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
+os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
+midjourney_temp_folder = config.get("midjourney_temp_folder", "")
+os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
+
+load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
+ "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
+
usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
-## 多账户机制
-multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
+# 多账户机制
+multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
if multi_api_key:
api_key_list = config.get("api_key_list", [])
if len(api_key_list) == 0:
@@ -93,21 +138,26 @@ if multi_api_key:
sys.exit(1)
shared.state.set_api_key_queue(api_key_list)
-auth_list = config.get("users", []) # 实际上是使用者的列表
+auth_list = config.get("users", []) # 实际上是使用者的列表
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
-api_host = os.environ.get("OPENAI_API_BASE", config.get("openai_api_base", None))
+api_host = os.environ.get(
+ "OPENAI_API_BASE", config.get("openai_api_base", None))
if api_host is not None:
shared.state.set_api_host(api_host)
+ os.environ["OPENAI_API_BASE"] = f"{api_host}/v1"
+ logging.info(f"OpenAI API Base set to: {os.environ['OPENAI_API_BASE']}")
-default_chuanhu_assistant_model = config.get("default_chuanhu_assistant_model", "gpt-3.5-turbo")
+default_chuanhu_assistant_model = config.get(
+ "default_chuanhu_assistant_model", "gpt-3.5-turbo")
for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
if config.get(x, None) is not None:
os.environ[x] = config[x]
+
@contextmanager
-def retrieve_openai_api(api_key = None):
+def retrieve_openai_api(api_key=None):
old_api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key is None:
os.environ["OPENAI_API_KEY"] = my_api_key
@@ -117,24 +167,20 @@ def retrieve_openai_api(api_key = None):
yield api_key
os.environ["OPENAI_API_KEY"] = old_api_key
-## 处理log
-log_level = config.get("log_level", "INFO")
-logging.basicConfig(
- level=log_level,
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
-)
-## 处理代理:
-http_proxy = config.get("http_proxy", "")
-https_proxy = config.get("https_proxy", "")
-http_proxy = os.environ.get("HTTP_PROXY", http_proxy)
-https_proxy = os.environ.get("HTTPS_PROXY", https_proxy)
+
+# 处理代理:
+http_proxy = os.environ.get("HTTP_PROXY", "")
+https_proxy = os.environ.get("HTTPS_PROXY", "")
+http_proxy = config.get("http_proxy", http_proxy)
+https_proxy = config.get("https_proxy", https_proxy)
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
os.environ["HTTP_PROXY"] = ""
os.environ["HTTPS_PROXY"] = ""
-local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
+local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
+
@contextmanager
def retrieve_proxy(proxy=None):
@@ -151,22 +197,62 @@ def retrieve_proxy(proxy=None):
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
os.environ["HTTP_PROXY"] = http_proxy
os.environ["HTTPS_PROXY"] = https_proxy
- yield http_proxy, https_proxy # return new proxy
+ yield http_proxy, https_proxy # return new proxy
# return old proxy
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
-## 处理advance docs
+# 处理latex options
+user_latex_option = config.get("latex_option", "default")
+if user_latex_option == "default":
+ latex_delimiters_set = [
+ {"left": "$$", "right": "$$", "display": True},
+ {"left": "$", "right": "$", "display": False},
+ {"left": "\\(", "right": "\\)", "display": False},
+ {"left": "\\[", "right": "\\]", "display": True},
+ ]
+elif user_latex_option == "strict":
+ latex_delimiters_set = [
+ {"left": "$$", "right": "$$", "display": True},
+ {"left": "\\(", "right": "\\)", "display": False},
+ {"left": "\\[", "right": "\\]", "display": True},
+ ]
+elif user_latex_option == "all":
+ latex_delimiters_set = [
+ {"left": "$$", "right": "$$", "display": True},
+ {"left": "$", "right": "$", "display": False},
+ {"left": "\\(", "right": "\\)", "display": False},
+ {"left": "\\[", "right": "\\]", "display": True},
+ {"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
+ {"left": "\\begin{align}", "right": "\\end{align}", "display": True},
+ {"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
+ {"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
+ {"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
+ ]
+elif user_latex_option == "disabled":
+ latex_delimiters_set = []
+else:
+ latex_delimiters_set = [
+ {"left": "$$", "right": "$$", "display": True},
+ {"left": "$", "right": "$", "display": False},
+ {"left": "\\(", "right": "\\)", "display": False},
+ {"left": "\\[", "right": "\\]", "display": True},
+ ]
+
+# 处理advance docs
advance_docs = defaultdict(lambda: defaultdict(dict))
advance_docs.update(config.get("advance_docs", {}))
+
+
def update_doc_config(two_column_pdf):
global advance_docs
advance_docs["pdf"]["two_column"] = two_column_pdf
logging.info(f"更新后的文件参数为:{advance_docs}")
-## 处理gradio.launch参数
+
+# 处理gradio.launch参数
server_name = config.get("server_name", None)
server_port = config.get("server_port", None)
if server_name is None:
@@ -188,3 +274,7 @@ except ValueError:
pass
share = config.get("share", False)
+
+# avatar
+bot_avatar = config.get("bot_avatar", "default")
+user_avatar = config.get("user_avatar", "default")
\ No newline at end of file
diff --git a/modules/index_func.py b/modules/index_func.py
index 09f792eb9df4d55d8bb1c172a9d07d7c41541266..ac128668c2920b6b4b945e0de3dcd745fe141200 100644
--- a/modules/index_func.py
+++ b/modules/index_func.py
@@ -1,7 +1,7 @@
import os
import logging
-import colorama
+import hashlib
import PyPDF2
from tqdm import tqdm
@@ -10,19 +10,6 @@ from modules.utils import *
from modules.config import local_embedding
-def get_index_name(file_src):
- file_paths = [x.name for x in file_src]
- file_paths.sort(key=lambda x: os.path.basename(x))
-
- md5_hash = hashlib.md5()
- for file_path in file_paths:
- with open(file_path, "rb") as f:
- while chunk := f.read(8192):
- md5_hash.update(chunk)
-
- return md5_hash.hexdigest()
-
-
def get_documents(file_src):
from langchain.schema import Document
from langchain.text_splitter import TokenTextSplitter
@@ -47,11 +34,12 @@ def get_documents(file_src):
pdftext = parse_pdf(filepath, two_column).text
except:
pdftext = ""
- with open(filepath, "rb", encoding="utf-8") as pdfFileObj:
+ with open(filepath, "rb") as pdfFileObj:
pdfReader = PyPDF2.PdfReader(pdfFileObj)
for page in tqdm(pdfReader.pages):
pdftext += page.extract_text()
- texts = [Document(page_content=pdftext, metadata={"source": filepath})]
+ texts = [Document(page_content=pdftext,
+ metadata={"source": filepath})]
elif file_type == ".docx":
logging.debug("Loading Word...")
from langchain.document_loaders import UnstructuredWordDocumentLoader
@@ -72,7 +60,8 @@ def get_documents(file_src):
text_list = excel_to_string(filepath)
texts = []
for elem in text_list:
- texts.append(Document(page_content=elem, metadata={"source": filepath}))
+ texts.append(Document(page_content=elem,
+ metadata={"source": filepath}))
else:
logging.debug("Loading text file...")
from langchain.document_loaders import TextLoader
@@ -111,14 +100,20 @@ def construct_index(
embedding_limit = None if embedding_limit == 0 else embedding_limit
separator = " " if separator == "" else separator
- index_name = get_index_name(file_src)
+ index_name = get_file_hash(file_src)
index_path = f"./index/{index_name}"
if local_embedding:
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
- embeddings = HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2")
+ embeddings = HuggingFaceEmbeddings(
+ model_name="sentence-transformers/distiluse-base-multilingual-cased-v2")
else:
from langchain.embeddings import OpenAIEmbeddings
- embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get("OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key))
+ if os.environ.get("OPENAI_API_TYPE", "openai") == "openai":
+ embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get(
+ "OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key))
+ else:
+ embeddings = OpenAIEmbeddings(deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
+ model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure")
if os.path.exists(index_path):
logging.info("找到了缓存的索引文件,加载中……")
return FAISS.load_local(index_path, embeddings)
diff --git a/modules/models/Google_PaLM.py b/modules/models/Google_PaLM.py
new file mode 100644
index 0000000000000000000000000000000000000000..79ca042e228b25546600e4258a0b75790e25bb52
--- /dev/null
+++ b/modules/models/Google_PaLM.py
@@ -0,0 +1,26 @@
+from .base_model import BaseLLMModel
+import google.generativeai as palm
+
+class Google_PaLM_Client(BaseLLMModel):
+ def __init__(self, model_name, api_key, user_name="") -> None:
+ super().__init__(model_name=model_name, user=user_name)
+ self.api_key = api_key
+
+ def _get_palm_style_input(self):
+ new_history = []
+ for item in self.history:
+ if item["role"] == "user":
+ new_history.append({'author': '1', 'content': item["content"]})
+ else:
+ new_history.append({'author': '0', 'content': item["content"]})
+ return new_history
+
+ def get_answer_at_once(self):
+ palm.configure(api_key=self.api_key)
+ messages = self._get_palm_style_input()
+ response = palm.chat(context=self.system_prompt, messages=messages, temperature=self.temperature, top_p=self.top_p)
+ if response.last is not None:
+ return response.last, len(response.last)
+ else:
+ reasons = '\n\n'.join(reason['reason'].name for reason in response.filters)
+ return "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n" + reasons, 0
\ No newline at end of file
diff --git a/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc b/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc
index d4433e7511f215299b6860e7e018885b0fb4d48f..52ddd9f380549cd59f30b5f64838cc1802ca55ec 100644
Binary files a/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc and b/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/Google_PaLM.cpython-311.pyc b/modules/models/__pycache__/Google_PaLM.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed75107609d5740e72e85029fee7bf9d492fc841
Binary files /dev/null and b/modules/models/__pycache__/Google_PaLM.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/azure.cpython-311.pyc b/modules/models/__pycache__/azure.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e551727c93687624924c67029e32057c8ceecfc3
Binary files /dev/null and b/modules/models/__pycache__/azure.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/base_model.cpython-311.pyc b/modules/models/__pycache__/base_model.cpython-311.pyc
index 351b91a086cfacf0d9677f176ceb54ce5668058c..e24b400d5a0b82cda92b099df8df18940afa66b4 100644
Binary files a/modules/models/__pycache__/base_model.cpython-311.pyc and b/modules/models/__pycache__/base_model.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/base_model.cpython-39.pyc b/modules/models/__pycache__/base_model.cpython-39.pyc
index 5cd38dcb76a41f29fc2d86f586c411480b65eda2..eef2894f3d10ee39351925e8da7952ad7f462fe2 100644
Binary files a/modules/models/__pycache__/base_model.cpython-39.pyc and b/modules/models/__pycache__/base_model.cpython-39.pyc differ
diff --git a/modules/models/__pycache__/models.cpython-311.pyc b/modules/models/__pycache__/models.cpython-311.pyc
index a1e63fb18cb6ef430173b8d71e575b4dd7da8c5f..67918c0d0fed25631b30819c4271bb73ae63ae92 100644
Binary files a/modules/models/__pycache__/models.cpython-311.pyc and b/modules/models/__pycache__/models.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/models.cpython-39.pyc b/modules/models/__pycache__/models.cpython-39.pyc
index 16fc4cbf158d5d39c01c2bf33a0d7f011765ed34..41fc2f8be62153f20564b31afc5d163dd1131e2d 100644
Binary files a/modules/models/__pycache__/models.cpython-39.pyc and b/modules/models/__pycache__/models.cpython-39.pyc differ
diff --git a/modules/models/azure.py b/modules/models/azure.py
new file mode 100644
index 0000000000000000000000000000000000000000..42cddfbda8cc74e40e114ee4bed46a2f9ff74ce9
--- /dev/null
+++ b/modules/models/azure.py
@@ -0,0 +1,17 @@
+from langchain.chat_models import AzureChatOpenAI
+import os
+
+from .base_model import Base_Chat_Langchain_Client
+
+# load_config_to_environ(["azure_openai_api_key", "azure_api_base_url", "azure_openai_api_version", "azure_deployment_name"])
+
+class Azure_OpenAI_Client(Base_Chat_Langchain_Client):
+ def setup_model(self):
+ # inplement this to setup the model then return it
+ return AzureChatOpenAI(
+ openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"],
+ openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
+ deployment_name=os.environ["AZURE_DEPLOYMENT_NAME"],
+ openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
+ openai_api_type="azure",
+ )
\ No newline at end of file
diff --git a/modules/models/base_model.py b/modules/models/base_model.py
index 0c703b6750cbea953bbe8e97a806473831035c0a..fa94579d725dbf9d739d58fc17b35bc2248c7fcd 100644
--- a/modules/models/base_model.py
+++ b/modules/models/base_model.py
@@ -29,6 +29,8 @@ from langchain.input import print_text
from langchain.schema import AgentAction, AgentFinish, LLMResult
from threading import Thread, Condition
from collections import deque
+from langchain.chat_models.base import BaseChatModel
+from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
from ..presets import *
from ..index_func import *
@@ -36,6 +38,7 @@ from ..utils import *
from .. import shared
from ..config import retrieve_proxy
+
class CallbackToIterator:
def __init__(self):
self.queue = deque()
@@ -52,7 +55,8 @@ class CallbackToIterator:
def __next__(self):
with self.cond:
- while not self.queue and not self.finished: # Wait for a value to be added to the queue.
+ # Wait for a value to be added to the queue.
+ while not self.queue and not self.finished:
self.cond.wait()
if not self.queue:
raise StopIteration()
@@ -63,6 +67,7 @@ class CallbackToIterator:
self.finished = True
self.cond.notify() # Wake up the generator if it's waiting.
+
def get_action_description(text):
match = re.search('```(.*?)```', text, re.S)
json_text = match.group(1)
@@ -72,10 +77,11 @@ def get_action_description(text):
action_name = json_dict['action']
action_input = json_dict['action_input']
if action_name != "Final Answer":
- return f'
{action_name}: {action_input}
' + return f'{action_name}: {action_input}\n\n
' else: return "" + class ChuanhuCallbackHandler(BaseCallbackHandler): def __init__(self, callback) -> None: @@ -117,6 +123,10 @@ class ChuanhuCallbackHandler(BaseCallbackHandler): """Run on new LLM token. Only available when streaming is enabled.""" self.callback(token) + def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any) -> Any: + """Run when a chat model starts running.""" + pass + class ModelType(Enum): Unknown = -1 @@ -129,6 +139,9 @@ class ModelType(Enum): YuanAI = 6 Minimax = 7 ChuanhuAgent = 8 + GooglePaLM = 9 + LangchainChat = 10 + Midjourney = 11 @classmethod def get_type(cls, model_name: str): @@ -152,6 +165,12 @@ class ModelType(Enum): model_type = ModelType.Minimax elif "川虎助理" in model_name_lower: model_type = ModelType.ChuanhuAgent + elif "palm" in model_name_lower: + model_type = ModelType.GooglePaLM + elif "midjourney" in model_name_lower: + model_type = ModelType.Midjourney + elif "azure" in model_name_lower or "api" in model_name_lower: + model_type = ModelType.LangchainChat else: model_type = ModelType.Unknown return model_type @@ -161,7 +180,7 @@ class BaseLLMModel: def __init__( self, model_name, - system_prompt="", + system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, n_choices=1, @@ -201,7 +220,8 @@ class BaseLLMModel: conversations are stored in self.history, with the most recent question, in OpenAI format should return a generator, each time give the next word (str) in the answer """ - logging.warning("stream predict not implemented, using at once predict instead") + logging.warning( + "stream predict not implemented, using at once predict instead") response, _ = self.get_answer_at_once() yield response @@ -212,7 +232,8 @@ class BaseLLMModel: the answer (str) total token count (int) """ - logging.warning("at once predict not implemented, using stream predict instead") + logging.warning( + "at once predict not implemented, using stream predict instead") response_iter = self.get_answer_stream_iter() count = 0 for response in response_iter: @@ -246,7 +267,8 @@ class BaseLLMModel: stream_iter = self.get_answer_stream_iter() if display_append: - display_append = "