diff --git a/CITATION.cff b/CITATION.cff
index c1b2475a4d12546ffe61d3d2530e954cc43a0563..ea3e1503a4aff8e954bb36b1bba6370f81f239f6 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -1,5 +1,5 @@
cff-version: 1.2.0
-title: ChuanhuChatGPT
+title: Chuanhu Chat
message: >-
If you use this software, please cite it using these
metadata.
@@ -13,8 +13,8 @@ authors:
orcid: https://orcid.org/0009-0005-0357-272X
repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
-abstract: Provided a light and easy to use interface for ChatGPT API
+abstract: This software provides a light and easy-to-use interface for ChatGPT API and many LLMs.
license: GPL-3.0
-commit: bd0034c37e5af6a90bd9c2f7dd073f6cd27c61af
-version: '20230405'
-date-released: '2023-04-05'
+commit: c6c08bc62ef80e37c8be52f65f9b6051a7eea1fa
+version: '20230709'
+date-released: '2023-07-09'
diff --git a/ChuanhuChatbot.py b/ChuanhuChatbot.py
index 69b99adc583455cf39e580313d8e261b988845ef..d498359af5c02037247406830672bcbbdbb7006b 100644
--- a/ChuanhuChatbot.py
+++ b/ChuanhuChatbot.py
@@ -1,8 +1,11 @@
# -*- coding:utf-8 -*-
-import os
import logging
-import sys
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
+)
+import colorama
import gradio as gr
from modules import config
@@ -12,6 +15,7 @@ from modules.presets import *
from modules.overwrites import *
from modules.webui import *
from modules.repo import *
+from modules.train_func import *
from modules.models.models import get_model
logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -40,6 +44,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
status_display = gr.Markdown(get_geoip(), elem_id="status-display")
with gr.Row(elem_id="float-display"):
user_info = gr.Markdown(value="getting user info...", elem_id="user-info")
+ config_info = gr.HTML(get_html("config_info.html").format(bot_avatar=config.bot_avatar, user_avatar=config.user_avatar), visible=False, elem_id="config-info")
update_info = gr.HTML(get_html("update.html").format(
current_version=repo_tag_html(),
version_time=version_time(),
@@ -63,13 +68,17 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
with gr.Column(min_width=42, scale=1):
submitBtn = gr.Button(value="", variant="primary", elem_id="submit-btn")
cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel-btn")
- with gr.Row():
- emptyBtn = gr.Button(
- i18n("🧹 新的对话"), elem_id="empty-btn"
- )
- retryBtn = gr.Button(i18n("🔄 重新生成"))
- delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
- delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
+ with gr.Row(elem_id="chatbot-buttons"):
+ with gr.Column(min_width=120, scale=1):
+ emptyBtn = gr.Button(
+ i18n("🧹 新的对话"), elem_id="empty-btn"
+ )
+ with gr.Column(min_width=120, scale=1):
+ retryBtn = gr.Button(i18n("🔄 重新生成"))
+ with gr.Column(min_width=120, scale=1):
+ delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
+ with gr.Column(min_width=120, scale=1):
+ delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(i18n("👍"))
@@ -178,6 +187,25 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
with gr.Column():
downloadFile = gr.File(interactive=True)
+ with gr.Tab(label=i18n("微调")):
+ openai_train_status = gr.Markdown(label=i18n("训练状态"), value=i18n("在这里[查看使用介绍](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/%E4%BD%BF%E7%94%A8%E6%95%99%E7%A8%8B#%E5%BE%AE%E8%B0%83-gpt-35)"))
+
+ with gr.Tab(label=i18n("准备数据集")):
+ dataset_preview_json = gr.JSON(label=i18n("数据集预览"), readonly=True)
+ dataset_selection = gr.Files(label = i18n("选择数据集"), file_types=[".xlsx", ".jsonl"], file_count="single")
+ upload_to_openai_btn = gr.Button(i18n("上传到OpenAI"), variant="primary", interactive=False)
+
+ with gr.Tab(label=i18n("训练")):
+ openai_ft_file_id = gr.Textbox(label=i18n("文件ID"), value="", lines=1, placeholder=i18n("上传到 OpenAI 后自动填充"))
+ openai_ft_suffix = gr.Textbox(label=i18n("模型名称后缀"), value="", lines=1, placeholder=i18n("可选,用于区分不同的模型"))
+ openai_train_epoch_slider = gr.Slider(label=i18n("训练轮数(Epochs)"), minimum=1, maximum=100, value=3, step=1, interactive=True)
+ openai_start_train_btn = gr.Button(i18n("开始训练"), variant="primary", interactive=False)
+
+ with gr.Tab(label=i18n("状态")):
+ openai_status_refresh_btn = gr.Button(i18n("刷新状态"))
+ openai_cancel_all_jobs_btn = gr.Button(i18n("取消所有任务"))
+ add_to_models_btn = gr.Button(i18n("添加训练好的模型到模型列表"), interactive=False)
+
with gr.Tab(label=i18n("高级")):
gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert-block")
use_streaming_checkbox = gr.Checkbox(
@@ -292,9 +320,9 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
elem_classes="view-only-textbox no-container",
)
# changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
-
updateChuanhuBtn = gr.Button(visible=False, elem_classes="invisible-btn", elem_id="update-chuanhu-btn")
+
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
@@ -376,7 +404,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
inputs=[current_model],
outputs=[chatbot, status_display],
show_progress=True,
- _js='clearHistoryHtml',
+ _js='clearChatbot',
)
retryBtn.click(**start_outputing_args).then(
@@ -466,6 +494,18 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
historyFileSelectDropdown.change(**load_history_from_file_args)
downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
+ # Train
+ dataset_selection.upload(handle_dataset_selection, dataset_selection, [dataset_preview_json, upload_to_openai_btn, openai_train_status])
+ dataset_selection.clear(handle_dataset_clear, [], [dataset_preview_json, upload_to_openai_btn])
+ upload_to_openai_btn.click(upload_to_openai, [dataset_selection], [openai_ft_file_id, openai_train_status], show_progress=True)
+
+ openai_ft_file_id.change(lambda x: gr.update(interactive=True) if len(x) > 0 else gr.update(interactive=False), [openai_ft_file_id], [openai_start_train_btn])
+ openai_start_train_btn.click(start_training, [openai_ft_file_id, openai_ft_suffix, openai_train_epoch_slider], [openai_train_status])
+
+ openai_status_refresh_btn.click(get_training_status, [], [openai_train_status, add_to_models_btn])
+ add_to_models_btn.click(add_to_models, [], [model_select_dropdown, openai_train_status], show_progress=True)
+ openai_cancel_all_jobs_btn.click(cancel_all_jobs, [], [openai_train_status], show_progress=True)
+
# Advanced
max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
@@ -513,4 +553,7 @@ demo.title = i18n("川虎Chat 🚀")
if __name__ == "__main__":
reload_javascript()
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch()
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
+ blocked_paths=["config.json"],
+ favicon_path="./web_assets/favicon.ico",
+ )
diff --git a/Dockerfile b/Dockerfile
index 335c2dba28ba8c365de9306858462a59dea25f28..85d5045d5316ac160277af1e7d60afa823c0f953 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,15 +1,18 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
+FROM python:3.9-slim-buster as builder
+RUN apt-get update \
+ && apt-get install -y build-essential \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
COPY requirements_advanced.txt .
-RUN pip install --user -r requirements.txt
-# RUN pip install --user -r requirements_advanced.txt
+RUN pip install --user --no-cache-dir -r requirements.txt
+# RUN pip install --user --no-cache-dir -r requirements_advanced.txt
-FROM python:3.9
-MAINTAINER iskoldt
+FROM python:3.9-slim-buster
+LABEL maintainer="iskoldt"
COPY --from=builder /root/.local /root/.local
ENV PATH=/root/.local/bin:$PATH
COPY . /app
WORKDIR /app
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
+ENV dockerrun=yes
+CMD ["python3", "-u", "ChuanhuChatbot.py","2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/config_example.json b/config_example.json
index b49e61913778f1d8c69414b9ac0b6bbb27cf5626..0b77caefbb39ef08d6a53b3b40ee67bb8a3b1576 100644
--- a/config_example.json
+++ b/config_example.json
@@ -7,6 +7,11 @@
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
"minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
"minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
+ "midjourney_proxy_api_base": "https://xxx/mj", // 你的 https://github.com/novicezk/midjourney-proxy 代理地址
+ "midjourney_proxy_api_secret": "", // 你的 MidJourney Proxy API Secret,用于鉴权访问 api,可选
+ "midjourney_discord_proxy_url": "", // 你的 MidJourney Discord Proxy URL,用于对生成对图进行反代,可选
+ "midjourney_temp_folder": "./tmp", // 你的 MidJourney 临时文件夹,用于存放生成的图片,填空则关闭自动下载切图(直接显示MJ的四宫格图)
+
//== Azure ==
"openai_api_type": "openai", // 可选项:azure, openai
@@ -24,6 +29,8 @@
"hide_history_when_not_logged_in": false, //未登录情况下是否不展示对话历史
"check_update": true, //是否启用检查更新
"default_model": "gpt-3.5-turbo", // 默认模型
+ "bot_avatar": "default", // 机器人头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
+ "user_avatar": "default", // 用户头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
//== API 用量 ==
"show_api_billing": false, //是否显示OpenAI API用量(启用需要填写sensitive_id)
diff --git a/locale/en_US.json b/locale/en_US.json
index 09f00893344b0b587c4a384f3bcf6d48064e5fa0..17a5aa618ee8e1c4425a7ce69e1d86adfbd24b6c 100644
--- a/locale/en_US.json
+++ b/locale/en_US.json
@@ -32,24 +32,33 @@
"📝 导出为Markdown": "📝 Export as Markdown",
"默认保存于history文件夹": "Default save in history folder",
"高级": "Advanced",
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ Caution: Changes require care. ⚠️\n\nIf unable to use, restore default settings.",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Caution: Changes require care. ⚠️",
"参数": "Parameters",
- "在这里输入停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
+ "停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
"用于定位滥用行为": "Used to locate abuse",
"用户名": "Username",
- "网络设置": "Network Settings",
"在这里输入API-Host...": "Type in API-Host here...",
"🔄 切换API地址": "🔄 Switch API Address",
- "在这里输入代理地址...": "Type in proxy address here...",
- "代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809)",
+ "未设置代理...": "No proxy...",
+ "代理地址": "Proxy address",
"🔄 设置代理地址": "🔄 Set Proxy Address",
- "🔙 恢复默认设置": "🔙 Restore Default Settings",
+ "🔙 恢复默认网络设置": "🔙 Reset Network Settings",
+ "🔄 检查更新...": "🔄 Check for Update...",
+ "取消": "Cancel",
+ "更新": "Update",
+ "详情": "Details",
+ "好": "OK",
+ "更新成功,请重启本程序": "Updated successfully, please restart this program",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Update failed, please try [manually updating](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
"川虎Chat 🚀": "Chuanhu Chat 🚀",
"开始实时传输回答……": "Start streaming output...",
"Token 计数: ": "Token Count: ",
- ",本次对话累计消耗了 ": ",Total cost for this dialogue is ",
+ ",本次对话累计消耗了 ": ", Total cost for this dialogue is ",
"**获取API使用情况失败**": "**Failed to get API usage**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Failed to get API usage**, correct sensitive_id needed in `config.json`",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Failed to get API usage**, wrong or expired sensitive_id",
"**本月使用金额** ": "**Monthly usage** ",
+ "本月使用金额": "Monthly usage",
"获取API使用情况失败:": "Failed to get API usage:",
"API密钥更改为了": "The API key is changed to",
"JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
@@ -64,10 +73,15 @@
"API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
"请输入对话内容。": "Enter the content of the conversation.",
"账单信息不适用": "Billing information is not applicable",
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "developor: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) and [明昭MZhao](https://space.bilibili.com/24807452)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
"切换亮暗色主题": "Switch light/dark theme",
"您的IP区域:未知。": "Your IP region: Unknown.",
"获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
- "您的IP区域:": "Your IP region: "
+ "您的IP区域:": "Your IP region: ",
+ "总结": "Summarize",
+ "生成内容总结中……": "Generating content summary...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Due to the following reasons, Google refuses to provide an answer to PaLM: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ To ensure the security of API-Key, please modify the network settings in the configuration file `config.json`.",
+ "网络参数": "Network parameter"
}
diff --git a/locale/ja_JP.json b/locale/ja_JP.json
index 1acbe7103ef01beb81a8039a77981af8fa31e402..db8fb8441bb669848c5eec4644d5b3e8d814060a 100644
--- a/locale/ja_JP.json
+++ b/locale/ja_JP.json
@@ -32,24 +32,33 @@
"📝 导出为Markdown": "📝 Markdownでエクスポート",
"默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
"高级": "Advanced",
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ 変更には慎重に ⚠️\n\nもし動作しない場合は、デフォルト設定に戻してください。",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 変更には慎重に ⚠️",
"参数": "パラメータ",
- "在这里输入停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
+ "停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
"用于定位滥用行为": "不正行為を特定するために使用されます",
"用户名": "ユーザー名",
- "网络设置": "ネットワーク設定",
"在这里输入API-Host...": "API-Hostを入力してください...",
"🔄 切换API地址": "🔄 APIアドレスを切り替え",
- "在这里输入代理地址...": "プロキシアドレスを入力してください...",
- "代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
+ "未设置代理...": "代理が設定されていません...",
+ "代理地址": "プロキシアドレス",
"🔄 设置代理地址": "🔄 プロキシアドレスを設定",
- "🔙 恢复默认设置": "🔙 デフォルト設定に戻す",
+ "🔙 恢复默认网络设置": "🔙 ネットワーク設定のリセット",
+ "🔄 检查更新...": "🔄 アップデートをチェック...",
+ "取消": "キャンセル",
+ "更新": "アップデート",
+ "详情": "詳細",
+ "好": "はい",
+ "更新成功,请重启本程序": "更新が成功しました、このプログラムを再起動してください",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "更新に失敗しました、[手動での更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)をお試しください。",
"川虎Chat 🚀": "川虎Chat 🚀",
"开始实时传输回答……": "ストリーム出力開始……",
"Token 计数: ": "Token数: ",
",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
"**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API使用状況の取得に失敗しました**、`config.json`に正しい`sensitive_id`を入力する必要があります",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API使用状況の取得に失敗しました**、sensitive_idが間違っているか、期限切れです",
"**本月使用金额** ": "**今月の使用料金** ",
+ "本月使用金额": "今月の使用料金",
"获取API使用情况失败:": "API使用状況の取得に失敗しました:",
"API密钥更改为了": "APIキーが変更されました",
"JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
@@ -64,10 +73,15 @@
"API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
"请输入对话内容。": "会話内容を入力してください。",
"账单信息不适用": "課金情報は対象外です",
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
"切换亮暗色主题": "テーマの明暗切替",
"您的IP区域:未知。": "あなたのIPアドレス地域:不明",
"获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
- "您的IP区域:": "あなたのIPアドレス地域:"
-}
\ No newline at end of file
+ "您的IP区域:": "あなたのIPアドレス地域:",
+ "总结": "要約する",
+ "生成内容总结中……": "コンテンツ概要を生成しています...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Googleは以下の理由から、PaLMの回答を返すことを拒否しています:\n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ APIキーの安全性を確保するために、`config.json`ファイルでネットワーク設定を変更してください。",
+ "网络参数": "ネットワークパラメータ"
+}
diff --git a/locale/ko_KR.json b/locale/ko_KR.json
new file mode 100644
index 0000000000000000000000000000000000000000..a7f45732eeae5b65930a078c0b326c9659abd270
--- /dev/null
+++ b/locale/ko_KR.json
@@ -0,0 +1,89 @@
+{
+ "未命名对话历史记录": "이름없는 대화 기록",
+ "在这里输入": "여기에 입력하세요",
+ "🧹 新的对话": "🧹 새로운 대화",
+ "🔄 重新生成": "🔄 재생성",
+ "🗑️ 删除最旧对话": "🗑️ 가장 오래된 대화 삭제",
+ "🗑️ 删除最新对话": "🗑️ 최신 대화 삭제",
+ "🗑️ 删除": "🗑️ 삭제",
+ "模型": "LLM 모델",
+ "多账号模式已开启,无需输入key,可直接开始对话": "다중 계정 모드가 활성화되어 있으므로 키를 입력할 필요가 없이 바로 대화를 시작할 수 있습니다",
+ "**发送消息** 或 **提交key** 以显示额度": "**메세지를 전송** 하거나 **Key를 입력**하여 크레딧 표시",
+ "选择模型": "모델 선택",
+ "选择LoRA模型": "LoRA 모델 선택",
+ "实时传输回答": "실시간 전송",
+ "单轮对话": "단일 대화",
+ "使用在线搜索": "온라인 검색 사용",
+ "选择回复语言(针对搜索&索引功能)": "답장 언어 선택 (검색 & 인덱스용)",
+ "上传索引文件": "업로드",
+ "双栏pdf": "2-column pdf",
+ "识别公式": "formula OCR",
+ "在这里输入System Prompt...": "여기에 시스템 프롬프트를 입력하세요...",
+ "加载Prompt模板": "프롬프트 템플릿 불러오기",
+ "选择Prompt模板集合文件": "프롬프트 콜렉션 파일 선택",
+ "🔄 刷新": "🔄 새로고침",
+ "从Prompt模板中加载": "프롬프트 템플릿에서 불러오기",
+ "保存/加载": "저장/불러오기",
+ "保存/加载对话历史记录": "대화 기록 저장/불러오기",
+ "从列表中加载对话": "리스트에서 대화 불러오기",
+ "设置文件名: 默认为.json,可选为.md": "파일 이름 설정: 기본값: .json, 선택: .md",
+ "设置保存文件名": "저장 파일명 설정",
+ "对话历史记录": "대화 기록",
+ "💾 保存对话": "💾 대화 저장",
+ "📝 导出为Markdown": "📝 마크다운으로 내보내기",
+ "默认保存于history文件夹": "히스토리 폴더에 기본 저장",
+ "高级": "고급",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 주의: 변경시 주의하세요. ⚠️",
+ "参数": "파라미터들",
+ "停止符,用英文逗号隔开...": "여기에 정지 토큰 입력, ','로 구분됨...",
+ "用于定位滥用行为": "악용 사례 파악에 활용됨",
+ "用户名": "사용자 이름",
+ "在这里输入API-Host...": "여기에 API host를 입력하세요...",
+ "🔄 切换API地址": "🔄 API 주소 변경",
+ "未设置代理...": "대리인이 설정되지 않았습니다...",
+ "代理地址": "프록시 주소",
+ "🔄 设置代理地址": "🔄 프록시 주소 설정",
+ "🔙 恢复默认网络设置": "🔙 네트워크 설정 초기화",
+ "🔄 检查更新...": "🔄 업데이트 확인...",
+ "取消": "취소",
+ "更新": "업데이트",
+ "详情": "상세",
+ "好": "예",
+ "更新成功,请重启本程序": "업데이트 성공, 이 프로그램을 재시작 해주세요",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "업데이트 실패, [수동 업데이트](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)를 시도하십시오",
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
+ "开始实时传输回答……": "실시간 응답 출력 시작...",
+ "Token 计数: ": "토큰 수: ",
+ ",本次对话累计消耗了 ": ",이 대화의 전체 비용은 ",
+ "**获取API使用情况失败**": "**API 사용량 가져오기 실패**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API 사용량 가져오기 실패**. `config.json`에 올바른 `sensitive_id`를 입력해야 합니다",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API 사용량 가져오기 실패**. sensitive_id가 잘못되었거나 만료되었습니다",
+ "**本月使用金额** ": "**이번 달 사용금액** ",
+ "本月使用金额": "이번 달 사용금액",
+ "获取API使用情况失败:": "API 사용량 가져오기 실패:",
+ "API密钥更改为了": "API 키가 변경되었습니다.",
+ "JSON解析错误,收到的内容: ": "JSON 파싱 에러, 응답: ",
+ "模型设置为了:": "설정된 모델: ",
+ "☹️发生了错误:": "☹️에러: ",
+ "获取对话时发生错误,请查看后台日志": "대화를 가져오는 중 에러가 발생했습니다. 백그라운드 로그를 확인하세요",
+ "请检查网络连接,或者API-Key是否有效。": "네트워크 연결 또는 API키가 유효한지 확인하세요",
+ "连接超时,无法获取对话。": "연결 시간 초과, 대화를 가져올 수 없습니다.",
+ "读取超时,无法获取对话。": "읽기 시간 초과, 대화를 가져올 수 없습니다.",
+ "代理错误,无法获取对话。": "프록시 에러, 대화를 가져올 수 없습니다.",
+ "SSL错误,无法获取对话。": "SSL 에러, 대화를 가져올 수 없습니다.",
+ "API key为空,请检查是否输入正确。": "API 키가 비어 있습니다. 올바르게 입력되었는지 확인하십세요.",
+ "请输入对话内容。": "대화 내용을 입력하세요.",
+ "账单信息不适用": "청구 정보를 가져올 수 없습니다",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "제작: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452), [Keldos](https://github.com/Keldos-Li)\n\n최신 코드 다운로드: [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "切换亮暗色主题": "라이트/다크 테마 전환",
+ "您的IP区域:未知。": "IP 지역: 알 수 없음.",
+ "获取IP地理位置失败。原因:": "다음과 같은 이유로 IP 위치를 가져올 수 없습니다. 이유: ",
+ "。你仍然可以使用聊天功能。": ". 채팅 기능을 계속 사용할 수 있습니다.",
+ "您的IP区域:": "당신의 IP 지역: ",
+ "总结": "요약",
+ "生成内容总结中……": "콘텐츠 요약 생성중...",
+ "上传": "업로드",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "구글은 다음과 같은 이유로 인해 PaLM의 응답을 거부합니다: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ API-Key의 안전을 보장하기 위해 네트워크 설정을 `config.json` 구성 파일에서 수정해주세요.",
+ "网络参数": "네트워크 매개변수"
+}
diff --git a/locale/sv-SE.json b/locale/sv-SE.json
new file mode 100644
index 0000000000000000000000000000000000000000..4d3c9627fd967724fceac2a55aaff6b434b70c1b
--- /dev/null
+++ b/locale/sv-SE.json
@@ -0,0 +1,87 @@
+{
+ "未命名对话历史记录": "Onämnd Dialoghistorik",
+ "在这里输入": "Skriv in här",
+ "🧹 新的对话": "🧹 Ny Dialog",
+ "🔄 重新生成": "🔄 Regenerera",
+ "🗑️ 删除最旧对话": "🗑️ Ta bort äldsta dialogen",
+ "🗑️ 删除最新对话": "🗑️ Ta bort senaste dialogen",
+ "模型": "Modell",
+ "多账号模式已开启,无需输入key,可直接开始对话": "Flerkontoläge är aktiverat, ingen nyckel behövs, du kan starta dialogen direkt",
+ "**发送消息** 或 **提交key** 以显示额度": "**Skicka meddelande** eller **Skicka in nyckel** för att visa kredit",
+ "选择模型": "Välj Modell",
+ "选择LoRA模型": "Välj LoRA Modell",
+ "实时传输回答": "Strömmande utdata",
+ "单轮对话": "Enkel dialog",
+ "使用在线搜索": "Använd online-sökning",
+ "选择回复语言(针对搜索&索引功能)": "Välj svarspråk (för sök- och indexfunktion)",
+ "上传索引文件": "Ladda upp",
+ "双栏pdf": "Två-kolumns pdf",
+ "识别公式": "Formel OCR",
+ "在这里输入System Prompt...": "Skriv in System Prompt här...",
+ "加载Prompt模板": "Ladda Prompt-mall",
+ "选择Prompt模板集合文件": "Välj Prompt-mall Samlingsfil",
+ "🔄 刷新": "🔄 Uppdatera",
+ "从Prompt模板中加载": "Ladda från Prompt-mall",
+ "保存/加载": "Spara/Ladda",
+ "保存/加载对话历史记录": "Spara/Ladda Dialoghistorik",
+ "从列表中加载对话": "Ladda dialog från lista",
+ "设置文件名: 默认为.json,可选为.md": "Ställ in filnamn: standard är .json, valfritt är .md",
+ "设置保存文件名": "Ställ in sparfilnamn",
+ "对话历史记录": "Dialoghistorik",
+ "💾 保存对话": "💾 Spara Dialog",
+ "📝 导出为Markdown": "📝 Exportera som Markdown",
+ "默认保存于history文件夹": "Sparas som standard i mappen history",
+ "高级": "Avancerat",
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Var försiktig med ändringar. ⚠️",
+ "参数": "Parametrar",
+ "停止符,用英文逗号隔开...": "Skriv in stopptecken här, separerade med kommatecken...",
+ "用于定位滥用行为": "Används för att lokalisera missbruk",
+ "用户名": "Användarnamn",
+ "在这里输入API-Host...": "Skriv in API-Host här...",
+ "🔄 切换API地址": "🔄 Byt API-adress",
+ "未设置代理...": "Inte inställd proxy...",
+ "代理地址": "Proxyadress",
+ "🔄 设置代理地址": "🔄 Ställ in Proxyadress",
+ "🔙 恢复网络默认设置": "🔙 Återställ Nätverksinställningar",
+ "🔄 检查更新...": "🔄 Sök efter uppdateringar...",
+ "取消": "Avbryt",
+ "更新": "Uppdatera",
+ "详情": "Detaljer",
+ "好": "OK",
+ "更新成功,请重启本程序": "Uppdaterat framgångsrikt, starta om programmet",
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Uppdateringen misslyckades, prova att [uppdatera manuellt](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
+ "开始实时传输回答……": "Börjar strömma utdata...",
+ "Token 计数: ": "Tokenräkning: ",
+ ",本次对话累计消耗了 ": ", Total kostnad för denna dialog är ",
+ "**获取API使用情况失败**": "**Misslyckades med att hämta API-användning**",
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Misslyckades med att hämta API-användning**, korrekt sensitive_id behövs i `config.json`",
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Misslyckades med att hämta API-användning**, felaktig eller utgången sensitive_id",
+ "**本月使用金额** ": "**Månadens användning** ",
+ "本月使用金额": "Månadens användning",
+ "获取API使用情况失败:": "Misslyckades med att hämta API-användning:",
+ "API密钥更改为了": "API-nyckeln har ändrats till",
+ "JSON解析错误,收到的内容: ": "JSON-tolkningsfel, mottaget innehåll: ",
+ "模型设置为了:": "Modellen är inställd på: ",
+ "☹️发生了错误:": "☹️Fel: ",
+ "获取对话时发生错误,请查看后台日志": "Ett fel uppstod när dialogen hämtades, kontrollera bakgrundsloggen",
+ "请检查网络连接,或者API-Key是否有效。": "Kontrollera nätverksanslutningen eller om API-nyckeln är giltig.",
+ "连接超时,无法获取对话。": "Anslutningen tog för lång tid, kunde inte hämta dialogen.",
+ "读取超时,无法获取对话。": "Läsningen tog för lång tid, kunde inte hämta dialogen.",
+ "代理错误,无法获取对话。": "Proxyfel, kunde inte hämta dialogen.",
+ "SSL错误,无法获取对话。": "SSL-fel, kunde inte hämta dialogen.",
+ "API key为空,请检查是否输入正确。": "API-nyckeln är tom, kontrollera om den är korrekt inmatad.",
+ "请输入对话内容。": "Ange dialoginnehåll.",
+ "账单信息不适用": "Faktureringsinformation är inte tillämplig",
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Utvecklad av Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) och [Keldos](https://github.com/Keldos-Li)\n\nLadda ner senaste koden från [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
+ "切换亮暗色主题": "Byt ljus/mörk tema",
+ "您的IP区域:未知。": "Din IP-region: Okänd.",
+ "获取IP地理位置失败。原因:": "Misslyckades med att hämta IP-plats. Orsak: ",
+ "。你仍然可以使用聊天功能。": ". Du kan fortfarande använda chattfunktionen.",
+ "您的IP区域:": "Din IP-region: ",
+ "总结": "Sammanfatta",
+ "生成内容总结中……": "Genererar innehållssammanfattning...",
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "På grund av följande skäl vägrar Google att ge ett svar till PaLM: \n\n",
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ För att säkerställa säkerheten för API-nyckeln, vänligen ändra nätverksinställningarna i konfigurationsfilen `config.json`.",
+ "网络参数": "nätverksparametrar"
+}
diff --git a/modules/.DS_Store b/modules/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..b568306d92f15378a29b2420f594020c5ea7d5fa
Binary files /dev/null and b/modules/.DS_Store differ
diff --git a/modules/__pycache__/__init__.cpython-311.pyc b/modules/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46566f61c6af9157586ea50da720489694853c2b
Binary files /dev/null and b/modules/__pycache__/__init__.cpython-311.pyc differ
diff --git a/modules/__pycache__/__init__.cpython-39.pyc b/modules/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab338d9b6416a67e830a0e71a8cd4f2880a31e6a
Binary files /dev/null and b/modules/__pycache__/__init__.cpython-39.pyc differ
diff --git a/modules/__pycache__/base_model.cpython-311.pyc b/modules/__pycache__/base_model.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0ae3c38679c88598195b896675fecf3489b89a2
Binary files /dev/null and b/modules/__pycache__/base_model.cpython-311.pyc differ
diff --git a/modules/__pycache__/base_model.cpython-39.pyc b/modules/__pycache__/base_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..063f1d071d5db438946e86861ec42002f62377fc
Binary files /dev/null and b/modules/__pycache__/base_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/config.cpython-311.pyc b/modules/__pycache__/config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9530da3815908ec3787cd4abe832c5a7616b51cd
Binary files /dev/null and b/modules/__pycache__/config.cpython-311.pyc differ
diff --git a/modules/__pycache__/config.cpython-39.pyc b/modules/__pycache__/config.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e2d78243a1c2f7b4b0633bfcdcdb6379e1943f3
Binary files /dev/null and b/modules/__pycache__/config.cpython-39.pyc differ
diff --git a/modules/__pycache__/index_func.cpython-311.pyc b/modules/__pycache__/index_func.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a993b842af381300d650ebfe736cc5865225fead
Binary files /dev/null and b/modules/__pycache__/index_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/index_func.cpython-39.pyc b/modules/__pycache__/index_func.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b97db89c58f233333a9eb6bf72fd871bb3cc4a29
Binary files /dev/null and b/modules/__pycache__/index_func.cpython-39.pyc differ
diff --git a/modules/__pycache__/llama_func.cpython-311.pyc b/modules/__pycache__/llama_func.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ee57f7edea1355fb65ea3c899096f97aaa08f787
Binary files /dev/null and b/modules/__pycache__/llama_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/llama_func.cpython-39.pyc b/modules/__pycache__/llama_func.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..315a04cbad9b518cc4ce20fb779b122df3bb0723
Binary files /dev/null and b/modules/__pycache__/llama_func.cpython-39.pyc differ
diff --git a/modules/__pycache__/models.cpython-311.pyc b/modules/__pycache__/models.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98f75e79e72daaf3ea535ce8e053af260bb07132
Binary files /dev/null and b/modules/__pycache__/models.cpython-311.pyc differ
diff --git a/modules/__pycache__/models.cpython-39.pyc b/modules/__pycache__/models.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef9a42bab10bacee11cde3d7040967eeecee7538
Binary files /dev/null and b/modules/__pycache__/models.cpython-39.pyc differ
diff --git a/modules/__pycache__/overwrites.cpython-311.pyc b/modules/__pycache__/overwrites.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cdf5755ea2443f0f2b64f1da6ee85be078b76a7c
Binary files /dev/null and b/modules/__pycache__/overwrites.cpython-311.pyc differ
diff --git a/modules/__pycache__/overwrites.cpython-39.pyc b/modules/__pycache__/overwrites.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d54035e7c8937f1d0fae198be3a2c862468e026
Binary files /dev/null and b/modules/__pycache__/overwrites.cpython-39.pyc differ
diff --git a/modules/__pycache__/pdf_func.cpython-311.pyc b/modules/__pycache__/pdf_func.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5225f07eca638916ed5da2c5e1d248d29432300
Binary files /dev/null and b/modules/__pycache__/pdf_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/pdf_func.cpython-39.pyc b/modules/__pycache__/pdf_func.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..931258c9879426ce84f1d5f9b086e797dbfb4e45
Binary files /dev/null and b/modules/__pycache__/pdf_func.cpython-39.pyc differ
diff --git a/modules/__pycache__/presets.cpython-311.pyc b/modules/__pycache__/presets.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..666229ab5bb4b74e06b75326f0bf67a5990cb398
Binary files /dev/null and b/modules/__pycache__/presets.cpython-311.pyc differ
diff --git a/modules/__pycache__/presets.cpython-39.pyc b/modules/__pycache__/presets.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ea74040f3b8124051ba6565d1a733dd3546cee4
Binary files /dev/null and b/modules/__pycache__/presets.cpython-39.pyc differ
diff --git a/modules/__pycache__/repo.cpython-311.pyc b/modules/__pycache__/repo.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8619438e55aa7d7af505f298ca8602d8ba21e0b2
Binary files /dev/null and b/modules/__pycache__/repo.cpython-311.pyc differ
diff --git a/modules/__pycache__/shared.cpython-311.pyc b/modules/__pycache__/shared.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14621e716a8d3758c252ae7ccc4c42451b4e3d13
Binary files /dev/null and b/modules/__pycache__/shared.cpython-311.pyc differ
diff --git a/modules/__pycache__/shared.cpython-39.pyc b/modules/__pycache__/shared.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..049e6cf0ee5f24ca3aa5346b9f5f810f37b0a025
Binary files /dev/null and b/modules/__pycache__/shared.cpython-39.pyc differ
diff --git a/modules/__pycache__/train_func.cpython-311.pyc b/modules/__pycache__/train_func.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b79271fc649a7635932ab60408e4aadcd143fd1f
Binary files /dev/null and b/modules/__pycache__/train_func.cpython-311.pyc differ
diff --git a/modules/__pycache__/utils.cpython-311.pyc b/modules/__pycache__/utils.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ddb1e91c08c0aa62e5c31b3afd29c07e64008e0
Binary files /dev/null and b/modules/__pycache__/utils.cpython-311.pyc differ
diff --git a/modules/__pycache__/utils.cpython-39.pyc b/modules/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4cbd0c64bbe16dd098fb92346f36a16ad64833d
Binary files /dev/null and b/modules/__pycache__/utils.cpython-39.pyc differ
diff --git a/modules/__pycache__/webui.cpython-311.pyc b/modules/__pycache__/webui.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e027ee26faf0e36cce97a84afaa5ef05a4df603
Binary files /dev/null and b/modules/__pycache__/webui.cpython-311.pyc differ
diff --git a/modules/__pycache__/webui_locale.cpython-311.pyc b/modules/__pycache__/webui_locale.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab640496a0d0de06686f01c53791021657202e00
Binary files /dev/null and b/modules/__pycache__/webui_locale.cpython-311.pyc differ
diff --git a/modules/__pycache__/webui_locale.cpython-39.pyc b/modules/__pycache__/webui_locale.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33f3f2670e677f3e5e53664ae1c549ff47021c99
Binary files /dev/null and b/modules/__pycache__/webui_locale.cpython-39.pyc differ
diff --git a/modules/config.py b/modules/config.py
index 115312dd2ec4e0bd99eb8b5869b2f0aeed649039..77f8bc62737cb08ae38a9345ae8dc420fb643f25 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -16,7 +16,6 @@ __all__ = [
"auth_list",
"dockerflag",
"retrieve_proxy",
- "log_level",
"advance_docs",
"update_doc_config",
"usage_limit",
@@ -92,10 +91,15 @@ os.environ["OPENAI_API_KEY"] = my_api_key
os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
if config.get("legacy_api_usage", False):
+ sensitive_id = my_api_key
+else:
sensitive_id = config.get("sensitive_id", "")
sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
-else:
- sensitive_id = my_api_key
+
+# 模型配置
+if "extra_models" in config:
+ presets.MODELS.extend(config["extra_models"])
+ logging.info(f"已添加额外的模型:{config['extra_models']}")
google_palm_api_key = config.get("google_palm_api_key", "")
google_palm_api_key = os.environ.get(
@@ -110,6 +114,15 @@ os.environ["MINIMAX_API_KEY"] = minimax_api_key
minimax_group_id = config.get("minimax_group_id", "")
os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
+midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
+os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
+midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
+os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
+midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
+os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
+midjourney_temp_folder = config.get("midjourney_temp_folder", "")
+os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
+
load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
"azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
@@ -155,12 +168,6 @@ def retrieve_openai_api(api_key=None):
os.environ["OPENAI_API_KEY"] = old_api_key
-# 处理log
-log_level = config.get("log_level", "INFO")
-logging.basicConfig(
- level=log_level,
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
-)
# 处理代理:
http_proxy = os.environ.get("HTTP_PROXY", "")
@@ -267,3 +274,7 @@ except ValueError:
pass
share = config.get("share", False)
+
+# avatar
+bot_avatar = config.get("bot_avatar", "default")
+user_avatar = config.get("user_avatar", "default")
\ No newline at end of file
diff --git a/modules/index_func.py b/modules/index_func.py
index b03a3c48911c8184e2701fbac44157b98ad3e582..ac128668c2920b6b4b945e0de3dcd745fe141200 100644
--- a/modules/index_func.py
+++ b/modules/index_func.py
@@ -1,7 +1,7 @@
import os
import logging
-import colorama
+import hashlib
import PyPDF2
from tqdm import tqdm
@@ -10,19 +10,6 @@ from modules.utils import *
from modules.config import local_embedding
-def get_index_name(file_src):
- file_paths = [x.name for x in file_src]
- file_paths.sort(key=lambda x: os.path.basename(x))
-
- md5_hash = hashlib.md5()
- for file_path in file_paths:
- with open(file_path, "rb") as f:
- while chunk := f.read(8192):
- md5_hash.update(chunk)
-
- return md5_hash.hexdigest()
-
-
def get_documents(file_src):
from langchain.schema import Document
from langchain.text_splitter import TokenTextSplitter
@@ -113,7 +100,7 @@ def construct_index(
embedding_limit = None if embedding_limit == 0 else embedding_limit
separator = " " if separator == "" else separator
- index_name = get_index_name(file_src)
+ index_name = get_file_hash(file_src)
index_path = f"./index/{index_name}"
if local_embedding:
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
diff --git a/modules/models/__pycache__/base_model.cpython-311.pyc b/modules/models/__pycache__/base_model.cpython-311.pyc
index eb010cd48966b90367e94deabb7ccb09aaeb2acd..799c9a956d1024af11711cff478b77d53f80165c 100644
Binary files a/modules/models/__pycache__/base_model.cpython-311.pyc and b/modules/models/__pycache__/base_model.cpython-311.pyc differ
diff --git a/modules/models/__pycache__/models.cpython-311.pyc b/modules/models/__pycache__/models.cpython-311.pyc
index cdc7e79c88e69609a763b847330f58ff9a703976..67918c0d0fed25631b30819c4271bb73ae63ae92 100644
Binary files a/modules/models/__pycache__/models.cpython-311.pyc and b/modules/models/__pycache__/models.cpython-311.pyc differ
diff --git a/modules/models/base_model.py b/modules/models/base_model.py
index 27df86767ff761dae6bc04397b507625fb615896..fa94579d725dbf9d739d58fc17b35bc2248c7fcd 100644
--- a/modules/models/base_model.py
+++ b/modules/models/base_model.py
@@ -141,6 +141,7 @@ class ModelType(Enum):
ChuanhuAgent = 8
GooglePaLM = 9
LangchainChat = 10
+ Midjourney = 11
@classmethod
def get_type(cls, model_name: str):
@@ -166,7 +167,9 @@ class ModelType(Enum):
model_type = ModelType.ChuanhuAgent
elif "palm" in model_name_lower:
model_type = ModelType.GooglePaLM
- elif "azure" or "api" in model_name_lower:
+ elif "midjourney" in model_name_lower:
+ model_type = ModelType.Midjourney
+ elif "azure" in model_name_lower or "api" in model_name_lower:
model_type = ModelType.LangchainChat
else:
model_type = ModelType.Unknown
diff --git a/modules/models/midjourney.py b/modules/models/midjourney.py
new file mode 100644
index 0000000000000000000000000000000000000000..65a560fc2427aad735d227d4d25b61b72f3ace5a
--- /dev/null
+++ b/modules/models/midjourney.py
@@ -0,0 +1,385 @@
+import base64
+import io
+import json
+import logging
+import pathlib
+import time
+import tempfile
+import os
+
+from datetime import datetime
+
+import requests
+import tiktoken
+from PIL import Image
+
+from modules.config import retrieve_proxy
+from modules.models.models import XMChat
+
+mj_proxy_api_base = os.getenv("MIDJOURNEY_PROXY_API_BASE")
+mj_discord_proxy_url = os.getenv("MIDJOURNEY_DISCORD_PROXY_URL")
+mj_temp_folder = os.getenv("MIDJOURNEY_TEMP_FOLDER")
+
+
+class Midjourney_Client(XMChat):
+
+ class FetchDataPack:
+ """
+ A class to store data for current fetching data from Midjourney API
+ """
+
+ action: str # current action, e.g. "IMAGINE", "UPSCALE", "VARIATION"
+ prefix_content: str # prefix content, task description and process hint
+ task_id: str # task id
+ start_time: float # task start timestamp
+ timeout: int # task timeout in seconds
+ finished: bool # whether the task is finished
+ prompt: str # prompt for the task
+
+ def __init__(self, action, prefix_content, task_id, timeout=900):
+ self.action = action
+ self.prefix_content = prefix_content
+ self.task_id = task_id
+ self.start_time = time.time()
+ self.timeout = timeout
+ self.finished = False
+
+ def __init__(self, model_name, api_key, user_name=""):
+ super().__init__(api_key, user_name)
+ self.model_name = model_name
+ self.history = []
+ self.api_key = api_key
+ self.headers = {
+ "Content-Type": "application/json",
+ "mj-api-secret": f"{api_key}"
+ }
+ self.proxy_url = mj_proxy_api_base
+ self.command_splitter = "::"
+
+ if mj_temp_folder:
+ temp = "./tmp"
+ if user_name:
+ temp = os.path.join(temp, user_name)
+ if not os.path.exists(temp):
+ os.makedirs(temp)
+ self.temp_path = tempfile.mkdtemp(dir=temp)
+ logging.info("mj temp folder: " + self.temp_path)
+ else:
+ self.temp_path = None
+
+ def use_mj_self_proxy_url(self, img_url):
+ """
+ replace discord cdn url with mj self proxy url
+ """
+ return img_url.replace(
+ "https://cdn.discordapp.com/",
+ mj_discord_proxy_url and mj_discord_proxy_url or "https://cdn.discordapp.com/"
+ )
+
+ def split_image(self, image_url):
+ """
+ when enabling temp dir, split image into 4 parts
+ """
+ with retrieve_proxy():
+ image_bytes = requests.get(image_url).content
+ img = Image.open(io.BytesIO(image_bytes))
+ width, height = img.size
+ # calculate half width and height
+ half_width = width // 2
+ half_height = height // 2
+ # create coordinates (top-left x, top-left y, bottom-right x, bottom-right y)
+ coordinates = [(0, 0, half_width, half_height),
+ (half_width, 0, width, half_height),
+ (0, half_height, half_width, height),
+ (half_width, half_height, width, height)]
+
+ images = [img.crop(c) for c in coordinates]
+ return images
+
+ def auth_mj(self):
+ """
+ auth midjourney api
+ """
+ # TODO: check if secret is valid
+ return {'status': 'ok'}
+
+ def request_mj(self, path: str, action: str, data: str, retries=3):
+ """
+ request midjourney api
+ """
+ mj_proxy_url = self.proxy_url
+ if mj_proxy_url is None or not (mj_proxy_url.startswith("http://") or mj_proxy_url.startswith("https://")):
+ raise Exception('please set MIDJOURNEY_PROXY_API_BASE in ENV or in config.json')
+
+ auth_ = self.auth_mj()
+ if auth_.get('error'):
+ raise Exception('auth not set')
+
+ fetch_url = f"{mj_proxy_url}/{path}"
+ # logging.info(f"[MJ Proxy] {action} {fetch_url} params: {data}")
+
+ for _ in range(retries):
+ try:
+ with retrieve_proxy():
+ res = requests.request(method=action, url=fetch_url, headers=self.headers, data=data)
+ break
+ except Exception as e:
+ print(e)
+
+ if res.status_code != 200:
+ raise Exception(f'{res.status_code} - {res.content}')
+
+ return res
+
+ def fetch_status(self, fetch_data: FetchDataPack):
+ """
+ fetch status of current task
+ """
+ if fetch_data.start_time + fetch_data.timeout < time.time():
+ fetch_data.finished = True
+ return "任务超时,请检查 dc 输出。描述:" + fetch_data.prompt
+
+ time.sleep(3)
+ status_res = self.request_mj(f"task/{fetch_data.task_id}/fetch", "GET", '')
+ status_res_json = status_res.json()
+ if not (200 <= status_res.status_code < 300):
+ raise Exception("任务状态获取失败:" + status_res_json.get(
+ 'error') or status_res_json.get('description') or '未知错误')
+ else:
+ fetch_data.finished = False
+ if status_res_json['status'] == "SUCCESS":
+ content = status_res_json['imageUrl']
+ fetch_data.finished = True
+ elif status_res_json['status'] == "FAILED":
+ content = status_res_json['failReason'] or '未知原因'
+ fetch_data.finished = True
+ elif status_res_json['status'] == "NOT_START":
+ content = f'任务未开始,已等待 {time.time() - fetch_data.start_time:.2f} 秒'
+ elif status_res_json['status'] == "IN_PROGRESS":
+ content = '任务正在运行'
+ if status_res_json.get('progress'):
+ content += f",进度:{status_res_json['progress']}"
+ elif status_res_json['status'] == "SUBMITTED":
+ content = '任务已提交处理'
+ elif status_res_json['status'] == "FAILURE":
+ fetch_data.finished = True
+ return "任务处理失败,原因:" + status_res_json['failReason'] or '未知原因'
+ else:
+ content = status_res_json['status']
+ if fetch_data.finished:
+ img_url = self.use_mj_self_proxy_url(status_res_json['imageUrl'])
+ if fetch_data.action == "DESCRIBE":
+ return f"\n{status_res_json['prompt']}"
+ time_cost_str = f"\n\n{fetch_data.action} 花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
+ upscale_str = ""
+ variation_str = ""
+ if fetch_data.action in ["IMAGINE", "UPSCALE", "VARIATION"]:
+ upscale = [f'/mj UPSCALE{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
+ for i in range(4)]
+ upscale_str = '\n放大图片:\n\n' + '\n\n'.join(upscale)
+ variation = [f'/mj VARIATION{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
+ for i in range(4)]
+ variation_str = '\n图片变体:\n\n' + '\n\n'.join(variation)
+ if self.temp_path and fetch_data.action in ["IMAGINE", "VARIATION"]:
+ try:
+ images = self.split_image(img_url)
+ # save images to temp path
+ for i in range(4):
+ images[i].save(pathlib.Path(self.temp_path) / f"{fetch_data.task_id}_{i}.png")
+ img_str = '\n'.join(
+ [f"![{fetch_data.task_id}](/file={self.temp_path}/{fetch_data.task_id}_{i}.png)"
+ for i in range(4)])
+ return fetch_data.prefix_content + f"{time_cost_str}\n\n{img_str}{upscale_str}{variation_str}"
+ except Exception as e:
+ logging.error(e)
+ return fetch_data.prefix_content + \
+ f"{time_cost_str}[![{fetch_data.task_id}]({img_url})]({img_url}){upscale_str}{variation_str}"
+ else:
+ content = f"**任务状态:** [{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - {content}"
+ content += f"\n\n花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
+ if status_res_json['status'] == 'IN_PROGRESS' and status_res_json.get('imageUrl'):
+ img_url = status_res_json.get('imageUrl')
+ return f"{content}\n[![{fetch_data.task_id}]({img_url})]({img_url})"
+ return content
+ return None
+
+ def handle_file_upload(self, files, chatbot, language):
+ """
+ handle file upload
+ """
+ if files:
+ for file in files:
+ if file.name:
+ logging.info(f"尝试读取图像: {file.name}")
+ self.try_read_image(file.name)
+ if self.image_path is not None:
+ chatbot = chatbot + [((self.image_path,), None)]
+ if self.image_bytes is not None:
+ logging.info("使用图片作为输入")
+ return None, chatbot, None
+
+ def reset(self):
+ self.image_bytes = None
+ self.image_path = None
+ return [], "已重置"
+
+ def get_answer_at_once(self):
+ content = self.history[-1]['content']
+ answer = self.get_help()
+
+ if not content.lower().startswith("/mj"):
+ return answer, len(content)
+
+ prompt = content[3:].strip()
+ action = "IMAGINE"
+ first_split_index = prompt.find(self.command_splitter)
+ if first_split_index > 0:
+ action = prompt[:first_split_index]
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
+ # "VARIATION", "BLEND", "REROLL"
+ ]:
+ raise Exception("任务提交失败:未知的任务类型")
+ else:
+ action_index = None
+ action_use_task_id = None
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
+ action_use_task_id = prompt[first_split_index + 5:]
+
+ try:
+ res = None
+ if action == "IMAGINE":
+ data = {
+ "prompt": prompt
+ }
+ if self.image_bytes is not None:
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
+ res = self.request_mj("submit/imagine", "POST",
+ json.dumps(data))
+ elif action == "DESCRIBE":
+ res = self.request_mj("submit/describe", "POST",
+ json.dumps({"base64": 'data:image/png;base64,' + self.image_bytes}))
+ elif action == "BLEND":
+ res = self.request_mj("submit/blend", "POST", json.dumps(
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
+ res = self.request_mj(
+ "submit/change", "POST",
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
+ res_json = res.json()
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
+ answer = "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
+ else:
+ task_id = res_json['result']
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
+
+ fetch_data = Midjourney_Client.FetchDataPack(
+ action=action,
+ prefix_content=prefix_content,
+ task_id=task_id,
+ )
+ fetch_data.prompt = prompt
+ while not fetch_data.finished:
+ answer = self.fetch_status(fetch_data)
+ except Exception as e:
+ logging.error("submit failed", e)
+ answer = "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
+
+ return answer, tiktoken.get_encoding("cl100k_base").encode(content)
+
+ def get_answer_stream_iter(self):
+ content = self.history[-1]['content']
+ answer = self.get_help()
+
+ if not content.lower().startswith("/mj"):
+ yield answer
+ return
+
+ prompt = content[3:].strip()
+ action = "IMAGINE"
+ first_split_index = prompt.find(self.command_splitter)
+ if first_split_index > 0:
+ action = prompt[:first_split_index]
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
+ "VARIATION", "BLEND", "REROLL"
+ ]:
+ yield "任务提交失败:未知的任务类型"
+ return
+
+ action_index = None
+ action_use_task_id = None
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
+ action_use_task_id = prompt[first_split_index + 5:]
+
+ try:
+ res = None
+ if action == "IMAGINE":
+ data = {
+ "prompt": prompt
+ }
+ if self.image_bytes is not None:
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
+ res = self.request_mj("submit/imagine", "POST",
+ json.dumps(data))
+ elif action == "DESCRIBE":
+ res = self.request_mj("submit/describe", "POST", json.dumps(
+ {"base64": 'data:image/png;base64,' + self.image_bytes}))
+ elif action == "BLEND":
+ res = self.request_mj("submit/blend", "POST", json.dumps(
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
+ res = self.request_mj(
+ "submit/change", "POST",
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
+ res_json = res.json()
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
+ yield "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
+ else:
+ task_id = res_json['result']
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
+ content = f"[{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - 任务提交成功:" + \
+ res_json.get('description') or '请稍等片刻'
+ yield content
+
+ fetch_data = Midjourney_Client.FetchDataPack(
+ action=action,
+ prefix_content=prefix_content,
+ task_id=task_id,
+ )
+ while not fetch_data.finished:
+ yield self.fetch_status(fetch_data)
+ except Exception as e:
+ logging.error('submit failed', e)
+ yield "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
+
+ def get_help(self):
+ return """```
+【绘图帮助】
+所有命令都需要以 /mj 开头,如:/mj a dog
+IMAGINE - 绘图,可以省略该命令,后面跟上绘图内容
+ /mj a dog
+ /mj IMAGINE::a cat
+DESCRIBE - 描述图片,需要在右下角上传需要描述的图片内容
+ /mj DESCRIBE::
+UPSCALE - 确认后放大图片,第一个数值为需要放大的图片(1~4),第二参数为任务ID
+ /mj UPSCALE::1::123456789
+ 请使用SD进行UPSCALE
+VARIATION - 图片变体,第一个数值为需要放大的图片(1~4),第二参数为任务ID
+ /mj VARIATION::1::123456789
+
+【绘图参数】
+所有命令默认会带上参数--v 5.2
+其他参数参照 https://docs.midjourney.com/docs/parameter-list
+长宽比 --aspect/--ar
+ --ar 1:2
+ --ar 16:9
+负面tag --no
+ --no plants
+ --no hands
+随机种子 --seed
+ --seed 1
+生成动漫风格(NijiJourney) --niji
+ --niji
+```
+"""
diff --git a/modules/models/models.py b/modules/models/models.py
index 23338ab3f20b9f541fa30c9879b28f488ccf9d04..fe1edbc91fc611881f6dff34affd23dc93596699 100644
--- a/modules/models/models.py
+++ b/modules/models/models.py
@@ -96,6 +96,7 @@ class OpenAIClient(BaseLLMModel):
# rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
rounded_usage = round(usage_data["total_usage"] / 100, 5)
usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
+ from ..webui import get_html
# return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
return get_html("billing_info.html").format(
label = i18n("本月使用金额"),
@@ -162,7 +163,7 @@ class OpenAIClient(BaseLLMModel):
# 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
if shared.state.completion_url != COMPLETION_URL:
- logging.info(f"使用自定义API URL: {shared.state.completion_url}")
+ logging.debug(f"使用自定义API URL: {shared.state.completion_url}")
with retrieve_proxy():
try:
@@ -208,7 +209,7 @@ class OpenAIClient(BaseLLMModel):
chunk_length = len(chunk)
try:
chunk = json.loads(chunk[6:])
- except json.JSONDecodeError:
+ except:
print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
error_msg += chunk
continue
@@ -620,6 +621,10 @@ def get_model(
elif model_type == ModelType.LangchainChat:
from .azure import Azure_OpenAI_Client
model = Azure_OpenAI_Client(model_name, user_name=user_name)
+ elif model_type == ModelType.Midjourney:
+ from .midjourney import Midjourney_Client
+ mj_proxy_api_secret = os.getenv("MIDJOURNEY_PROXY_API_SECRET")
+ model = Midjourney_Client(model_name, mj_proxy_api_secret, user_name=user_name)
elif model_type == ModelType.Unknown:
raise ValueError(f"未知模型: {model_name}")
logging.info(msg)
diff --git a/modules/presets.py b/modules/presets.py
index 1efdaff22ef0a1ee2330b3f06530356b920ef816..a56d50e1c7aefae37b3252b983d445ea327471a4 100644
--- a/modules/presets.py
+++ b/modules/presets.py
@@ -69,6 +69,7 @@ ONLINE_MODELS = [
"yuanai-1.0-rhythm_poems",
"minimax-abab4-chat",
"minimax-abab5-chat",
+ "midjourney"
]
LOCAL_MODELS = [
diff --git a/modules/repo.py b/modules/repo.py
index d59d9f86e0b1ae69159a6db5411cfdb896bbc43f..2788de5b06a744bc436df677a973d89c26489a8a 100644
--- a/modules/repo.py
+++ b/modules/repo.py
@@ -51,14 +51,14 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_
return (result.stdout or "")
-def run_pip(command, desc=None, live=default_command_live):
+def run_pip(command, desc=None, pref=None, live=default_command_live):
# if args.skip_install:
# return
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(
f'"{python}" -m pip {command} --prefer-binary{index_url_line}',
- desc=f"Installing {desc}...",
+ desc=f"{pref} Installing {desc}...",
errdesc=f"Couldn't install {desc}",
live=live
)
@@ -158,6 +158,12 @@ def get_tag_commit_hash(tag):
commit_hash = ""
return commit_hash
+def repo_need_stash():
+ try:
+ return subprocess.check_output([git, "diff-index", "--quiet", "HEAD", "--"], shell=False, encoding='utf8').strip() != ""
+ except Exception:
+ return True
+
def background_update():
# {git} fetch --all && ({git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f || ({git} stash && {git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f && {git} stash pop)) && {pip} install -r requirements.txt")
try:
@@ -165,47 +171,65 @@ def background_update():
latest_release_tag = latest_release["tag"]
latest_release_hash = get_tag_commit_hash(latest_release_tag)
need_pip = latest_release["need_pip"]
+ need_stash = repo_need_stash()
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
current_branch = get_current_branch()
- updater_branch = f'tmp_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}'
+ updater_branch = f'tmp_{timestamp}'
+ backup_branch = f'backup_{timestamp}'
track_repo = "https://github.com/GaiZhenbiao/ChuanhuChatGPT.git"
try:
try:
- run(f"{git} fetch {track_repo}", desc="Fetching from github...", live=False)
+ run(f"{git} fetch {track_repo}", desc="[Updater] Fetching from github...", live=False)
except Exception:
- logging.error(f"Update failed in fetching")
+ logging.error(f"Update failed in fetching, check your network connection")
return "failed"
- run(f'{git} stash save -a "updater-tmp"')
-
+ run(f'{git} stash push --include-untracked -m "updater-{timestamp}"',
+ desc=f'[Updater] Restoring you local changes on stash updater-{timestamp}', live=False) if need_stash else None
+
+ run(f"{git} checkout -b {backup_branch}", live=False)
run(f"{git} checkout -b {updater_branch}", live=False)
run(f"{git} reset --hard FETCH_HEAD", live=False)
- run(f"{git} reset --hard {latest_release_hash}", desc=f'Checking out {latest_release_tag}...')
+ run(f"{git} reset --hard {latest_release_hash}", desc=f'[Updater] Checking out {latest_release_tag}...', live=False)
run(f"{git} checkout {current_branch}", live=False)
try:
- run(f"{git} merge {updater_branch} -q", desc="Trying to apply latest update...")
+ run(f"{git} merge --no-edit {updater_branch} -q", desc=f"[Updater] Trying to apply latest update on version {latest_release_tag}...")
except Exception:
logging.error(f"Update failed in merging")
try:
- run(f"{git} merge --abort", desc="Canceling update...")
- run(f"{git} reset --hard {current_branch}", live=False)
- run(f"{git} stash pop", live=False)
+ run(f"{git} merge --abort", desc="[Updater] Conflict detected, canceling update...")
+ run(f"{git} reset --hard {backup_branch}", live=False)
run(f"{git} branch -D -f {updater_branch}", live=False)
+ run(f"{git} branch -D -f {backup_branch}", live=False)
+ run(f"{git} stash pop", live=False) if need_stash else None
logging.error(f"Update failed, but your file was safely reset to the state before the update.")
return "failed"
except Exception as e:
- logging.error(f"!!!Update failed in resetting, try to reset your files manually.")
+ logging.error(f"!!!Update failed in resetting, try to reset your files manually. {e}")
return "failed"
-
- run(f"{git} stash pop", live=False)
+
+ if need_stash:
+ try:
+ run(f"{git} stash apply", desc="[Updater] Trying to restore your local modifications...", live=False)
+ except Exception:
+ run(f"{git} reset --hard {backup_branch}", desc="[Updater] Conflict detected, canceling update...", live=False)
+ run(f"{git} branch -D -f {updater_branch}", live=False)
+ run(f"{git} branch -D -f {backup_branch}", live=False)
+ run(f"{git} stash pop", live=False)
+ logging.error(f"Update failed in applying your local changes, but your file was safely reset to the state before the update.")
+ return "failed"
+ run(f"{git} stash drop", live=False)
+
run(f"{git} branch -D -f {updater_branch}", live=False)
+ run(f"{git} branch -D -f {backup_branch}", live=False)
except Exception as e:
logging.error(f"Update failed: {e}")
return "failed"
if need_pip:
try:
- run_pip(f"install -r requirements.txt", "requirements")
+ run_pip(f"install -r requirements.txt", pref="[Updater]", desc="requirements", live=False)
except Exception:
logging.error(f"Update failed in pip install")
return "failed"
diff --git a/modules/train_func.py b/modules/train_func.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5e2c6aea1f3f28d4bb3f9f4fd2f6d761ba00a2
--- /dev/null
+++ b/modules/train_func.py
@@ -0,0 +1,161 @@
+import os
+import logging
+import traceback
+
+import openai
+import gradio as gr
+import ujson as json
+import commentjson
+import openpyxl
+
+import modules.presets as presets
+from modules.utils import get_file_hash, count_token
+from modules.presets import i18n
+
+def excel_to_jsonl(filepath, preview=False):
+ # 打开Excel文件
+ workbook = openpyxl.load_workbook(filepath)
+
+ # 获取第一个工作表
+ sheet = workbook.active
+
+ # 获取所有行数据
+ data = []
+ for row in sheet.iter_rows(values_only=True):
+ data.append(row)
+
+ # 构建字典列表
+ headers = data[0]
+ jsonl = []
+ for row in data[1:]:
+ row_data = dict(zip(headers, row))
+ if any(row_data.values()):
+ jsonl.append(row_data)
+ formatted_jsonl = []
+ for i in jsonl:
+ if "提问" in i and "答案" in i:
+ if "系统" in i :
+ formatted_jsonl.append({
+ "messages":[
+ {"role": "system", "content": i["系统"]},
+ {"role": "user", "content": i["提问"]},
+ {"role": "assistant", "content": i["答案"]}
+ ]
+ })
+ else:
+ formatted_jsonl.append({
+ "messages":[
+ {"role": "user", "content": i["提问"]},
+ {"role": "assistant", "content": i["答案"]}
+ ]
+ })
+ else:
+ logging.warning(f"跳过一行数据,因为没有找到提问和答案: {i}")
+ return formatted_jsonl
+
+def jsonl_save_to_disk(jsonl, filepath):
+ file_hash = get_file_hash(file_paths = [filepath])
+ os.makedirs("files", exist_ok=True)
+ save_path = f"files/{file_hash}.jsonl"
+ with open(save_path, "w") as f:
+ f.write("\n".join([json.dumps(i, ensure_ascii=False) for i in jsonl]))
+ return save_path
+
+def estimate_cost(ds):
+ dialogues = []
+ for l in ds:
+ for m in l["messages"]:
+ dialogues.append(m["content"])
+ dialogues = "\n".join(dialogues)
+ tokens = count_token(dialogues)
+ return f"Token 数约为 {tokens},预估每轮(epoch)费用约为 {tokens / 1000 * 0.008} 美元。"
+
+
+def handle_dataset_selection(file_src):
+ logging.info(f"Loading dataset {file_src.name}...")
+ preview = ""
+ if file_src.name.endswith(".jsonl"):
+ with open(file_src.name, "r") as f:
+ ds = [json.loads(l) for l in f.readlines()]
+ else:
+ ds = excel_to_jsonl(file_src.name)
+ preview = ds[0]
+
+ return preview, gr.update(interactive=True), estimate_cost(ds)
+
+def upload_to_openai(file_src):
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ dspath = file_src.name
+ msg = ""
+ logging.info(f"Uploading dataset {dspath}...")
+ if dspath.endswith(".xlsx"):
+ jsonl = excel_to_jsonl(dspath)
+ dspath = jsonl_save_to_disk(jsonl, dspath)
+ try:
+ uploaded = openai.File.create(
+ file=open(dspath, "rb"),
+ purpose='fine-tune'
+ )
+ return uploaded.id, f"上传成功"
+ except Exception as e:
+ traceback.print_exc()
+ return "", f"上传失败,原因:{ e }"
+
+def build_event_description(id, status, trained_tokens, name=i18n("暂时未知")):
+ # convert to markdown
+ return f"""
+ #### 训练任务 {id}
+
+ 模型名称:{name}
+
+ 状态:{status}
+
+ 已经训练了 {trained_tokens} 个token
+ """
+
+def start_training(file_id, suffix, epochs):
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ try:
+ job = openai.FineTuningJob.create(training_file=file_id, model="gpt-3.5-turbo", suffix=suffix, hyperparameters={"n_epochs": epochs})
+ return build_event_description(job.id, job.status, job.trained_tokens)
+ except Exception as e:
+ traceback.print_exc()
+ if "is not ready" in str(e):
+ return "训练出错,因为文件还没准备好。OpenAI 需要一点时间准备文件,过几分钟再来试试。"
+ return f"训练失败,原因:{ e }"
+
+def get_training_status():
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ active_jobs = [build_event_description(job["id"], job["status"], job["trained_tokens"], job["fine_tuned_model"]) for job in openai.FineTuningJob.list(limit=10)["data"] if job["status"] != "cancelled"]
+ return "\n\n".join(active_jobs), gr.update(interactive=True) if len(active_jobs) > 0 else gr.update(interactive=False)
+
+def handle_dataset_clear():
+ return gr.update(value=None), gr.update(interactive=False)
+
+def add_to_models():
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ succeeded_jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] == "succeeded"]
+ extra_models = [job["fine_tuned_model"] for job in succeeded_jobs]
+ for i in extra_models:
+ if i not in presets.MODELS:
+ presets.MODELS.append(i)
+
+ with open('config.json', 'r') as f:
+ data = commentjson.load(f)
+ if 'extra_models' in data:
+ for i in extra_models:
+ if i not in data['extra_models']:
+ data['extra_models'].append(i)
+ else:
+ data['extra_models'] = extra_models
+ with open('config.json', 'w') as f:
+ commentjson.dump(data, f, indent=4)
+
+ return gr.update(choices=presets.MODELS), f"成功添加了 {len(succeeded_jobs)} 个模型。"
+
+def cancel_all_jobs():
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] not in ["cancelled", "succeeded"]]
+ for job in jobs:
+ openai.FineTuningJob.cancel(job["id"])
+ return f"成功取消了 {len(jobs)} 个训练任务。"
diff --git a/modules/utils.py b/modules/utils.py
index b061f2de51e6bec83dd18799521042867fe7341e..fcc7d4b198a8e796d3ef5016c8eb0226ca4d6f9a 100644
--- a/modules/utils.py
+++ b/modules/utils.py
@@ -2,17 +2,14 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
import logging
-import json
+import commentjson as json
import os
import datetime
-from datetime import timezone
-import hashlib
import csv
import requests
import re
import html
-import sys
-import subprocess
+import hashlib
import gradio as gr
from pypinyin import lazy_pinyin
@@ -129,9 +126,10 @@ def dislike(current_model, *args):
return current_model.dislike(*args)
-def count_token(message):
+def count_token(input_str):
encoding = tiktoken.get_encoding("cl100k_base")
- input_str = f"role: {message['role']}, content: {message['content']}"
+ if type(input_str) == dict:
+ input_str = f"role: {input_str['role']}, content: {input_str['content']}"
length = len(encoding.encode(input_str))
return length
@@ -241,7 +239,7 @@ def convert_bot_before_marked(chat_message):
code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
code_blocks = code_block_pattern.findall(chat_message)
non_code_parts = code_block_pattern.split(chat_message)[::2]
- result = []
+ result = []
for non_code, code in zip(non_code_parts, code_blocks + [""]):
if non_code.strip():
result.append(non_code)
@@ -542,10 +540,10 @@ def transfer_input(inputs):
def update_chuanhu():
from .repo import background_update
- print("Trying to update...")
+ print("[Updater] Trying to update...")
update_status = background_update()
if update_status == "success":
- print("Successfully updated, restart needed")
+ logging.info("Successfully updated, restart needed")
status = 'success'
return gr.Markdown.update(value=i18n("更新成功,请重启本程序")+status)
else:
@@ -658,3 +656,28 @@ def beautify_err_msg(err_msg):
if "Resource not found" in err_msg:
return i18n("请查看 config_example.json,配置 Azure OpenAI")
return err_msg
+
+def auth_from_conf(username, password):
+ try:
+ with open("config.json", encoding="utf-8") as f:
+ conf = json.load(f)
+ usernames, passwords = [i[0] for i in conf["users"]], [i[1] for i in conf["users"]]
+ if username in usernames:
+ if passwords[usernames.index(username)] == password:
+ return True
+ return False
+ except:
+ return False
+
+def get_file_hash(file_src=None, file_paths=None):
+ if file_src:
+ file_paths = [x.name for x in file_src]
+ file_paths.sort(key=lambda x: os.path.basename(x))
+
+ md5_hash = hashlib.md5()
+ for file_path in file_paths:
+ with open(file_path, "rb") as f:
+ while chunk := f.read(8192):
+ md5_hash.update(chunk)
+
+ return md5_hash.hexdigest()
diff --git a/readme/README_en.md b/readme/README_en.md
index a906ecb3ebc411f5cdeb33d661266a489a20c3b0..80af4fbbfba5d15e1cb6d1f4b67808ca76fa37d7 100644
--- a/readme/README_en.md
+++ b/readme/README_en.md
@@ -6,7 +6,7 @@
川虎 Chat 🐯 Chuanhu Chat
-
+
@@ -44,6 +44,23 @@
+## Supported LLM Models
+
+**LLM models via API**:
+
+- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
+- [Google PaLM](https://developers.generativeai.google/products/palm)
+- [Inspur Yuan 1.0](https://air.inspur.com/home)
+- [MiniMax](https://api.minimax.chat/)
+- [XMChat](https://github.com/MILVLG/xmchat)
+
+**LLM models via local deployment**:
+
+- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
+- [LLaMA](https://github.com/facebookresearch/llama)
+- [StableLM](https://github.com/Stability-AI/StableLM)
+- [MOSS](https://github.com/OpenLMLab/MOSS)
+
## Usage Tips
- To better control the ChatGPT, use System Prompt.
@@ -51,11 +68,11 @@
- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
- To start a new line in the input box, press Shift + Enter keys.
- To quickly switch between input history, press ↑ and ↓ key in the input box.
-- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`.
-- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
+- To deploy the program onto a server, set `"server_name": "0.0.0.0", "server_port" ,` in `config.json`.
+- To get a public shared link, set `"share": true,` in `config.json`. Please be noted that the program must be running in order to be accessed via a public link.
- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
-## Installation
+## Quickstart
```shell
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
@@ -87,10 +104,6 @@ When you encounter problems, you should try manually pulling the latest changes
```
pip install -r requirements.txt
```
-3. Update Gradio
- ```
- pip install gradio --upgrade --force-reinstall
- ```
Generally, you can solve most problems by following these steps.
diff --git a/readme/README_ja.md b/readme/README_ja.md
index fc56eec0b81c22ff0a49e3960aa52ffd7d6dc5cb..1e0771070e0c9852f02a1024c65176f5a1ac46ba 100644
--- a/readme/README_ja.md
+++ b/readme/README_ja.md
@@ -6,7 +6,7 @@
川虎 Chat 🐯 Chuanhu Chat
-
+
@@ -44,17 +44,34 @@
+## サポートされている大規模言語モデル
+
+**APIを通じてアクセス可能な大規模言語モデル**:
+
+- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
+- [Google PaLM](https://developers.generativeai.google/products/palm)
+- [Inspur Yuan 1.0](https://air.inspur.com/home)
+- [MiniMax](https://api.minimax.chat/)
+- [XMChat](https://github.com/MILVLG/xmchat)
+
+**ローカルに展開された大規模言語モデル**:
+
+- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
+- [LLaMA](https://github.com/facebookresearch/llama)
+- [StableLM](https://github.com/Stability-AI/StableLM)
+- [MOSS](https://github.com/OpenLMLab/MOSS)
+
## 使う上でのTips
- ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
- プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
- 入力ボックスで改行するには、Shift + Enterキーを押してください。
- 入力履歴を素早く切り替えるには、入力ボックスで ↑と↓キーを押す。
-- プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=)`に変更します。
-- 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
+- プログラムをサーバーに展開するには、`config.json` 内の `"server_name": "0.0.0.0", "server_port": <ポート番号>`を設定してください。
+- 共有リンクを取得するには、 `config.json` 内の `"share": true` を設定してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
- Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
-## インストール
+## クイックスタート
```shell
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
@@ -86,10 +103,6 @@ python ChuanhuChatbot.py
```
pip install -r requirements.txt
```
-3. Gradioを更新
- ```
- pip install gradio --upgrade --force-reinstall
- ```
一般的に、以下の手順でほとんどの問題を解決することができます。
diff --git a/requirements.txt b/requirements.txt
index 0877919ba78ca6a8099f7986185760870560c143..5dcb8cab519a78fe35591770fa3df4f5384f0dcd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,7 +7,7 @@ tqdm
colorama
googlesearch-python
Pygments
-langchain==0.0.173
+langchain==0.0.276
markdown
PyPDF2
pdfplumber
@@ -21,7 +21,8 @@ duckduckgo-search
arxiv
wikipedia
google.generativeai
-openai
+openai>=0.27.9
unstructured
google-api-python-client
tabulate
+ujson
diff --git a/run_Windows.bat b/run_Windows.bat
index 4c18f9ccaeea0af972301ffdf48778641221f76d..5dd4dd065807bc83425e3876c1be14b5a234e253 100644
--- a/run_Windows.bat
+++ b/run_Windows.bat
@@ -1,5 +1,24 @@
@echo off
echo Opening ChuanhuChatGPT...
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
+if not exist "%~dp0\ChuanhuChat\Scripts" (
+ echo Creating venv...
+ python -m venv ChuanhuChat
+
+ cd /d "%~dp0\ChuanhuChat\Scripts"
+ call activate.bat
+
+ cd /d "%~dp0"
+ pip install -r requirements.txt
+)
+
+goto :activate_venv
+
+:launch
+%PYTHON% ChuanhuChatbot.py %*
+pause
+
+:activate_venv
+set PYTHON="%~dp0\ChuanhuChat\Scripts\Python.exe"
+echo venv %PYTHON%
+goto :launch
diff --git a/web_assets/chatbot.png b/web_assets/chatbot.png
new file mode 100644
index 0000000000000000000000000000000000000000..03a767753b7add4f4d80d5889c0314181ac6c750
Binary files /dev/null and b/web_assets/chatbot.png differ
diff --git a/web_assets/html/config_info.html b/web_assets/html/config_info.html
new file mode 100644
index 0000000000000000000000000000000000000000..899c6b0cbbd490e28fc36988bbbe9901d1df5729
--- /dev/null
+++ b/web_assets/html/config_info.html
@@ -0,0 +1,2 @@
+{bot_avatar}
+{user_avatar}
\ No newline at end of file
diff --git a/web_assets/javascript/ChuanhuChat.js b/web_assets/javascript/ChuanhuChat.js
index 7affc674d914a69eb5e2eaf8a4e61418f1b3fe78..1128b7782111381f4540282db574ba951e65f2f1 100644
--- a/web_assets/javascript/ChuanhuChat.js
+++ b/web_assets/javascript/ChuanhuChat.js
@@ -12,6 +12,7 @@ var user_input_tb = null;
var userInfoDiv = null;
var appTitleDiv = null;
var chatbot = null;
+var chatbotIndicator = null;
var chatbotWrap = null;
var apSwitch = null;
var messageBotDivs = null;
@@ -24,7 +25,6 @@ var sliders = null;
var updateChuanhuBtn = null;
var statusDisplay = null;
-
var isInIframe = (window.self !== window.top);
var currentTime = new Date().getTime();
var initialized = false;
@@ -43,7 +43,7 @@ function gradioLoaded(mutations) {
}
function initialize() {
- var needInit = {gradioContainer, apSwitch, user_input_tb, userInfoDiv, appTitleDiv, chatbot, chatbotWrap, statusDisplay, sliders, updateChuanhuBtn};
+ var needInit = {gradioContainer, apSwitch, user_input_tb, userInfoDiv, appTitleDiv, chatbot, chatbotIndicator, chatbotWrap, statusDisplay, sliders, updateChuanhuBtn};
initialized = true;
loginUserForm = gradioApp().querySelector(".gradio-container > .main > .wrap > .panel > .form")
@@ -52,6 +52,7 @@ function initialize() {
userInfoDiv = gradioApp().getElementById("user-info");
appTitleDiv = gradioApp().getElementById("app-title");
chatbot = gradioApp().querySelector('#chuanhu-chatbot');
+ chatbotIndicator = gradioApp().querySelector('#chuanhu-chatbot>div.wrap');
chatbotWrap = gradioApp().querySelector('#chuanhu-chatbot > .wrapper > .wrap');
apSwitch = gradioApp().querySelector('.apSwitch input[type="checkbox"]');
updateToast = gradioApp().querySelector("#toast-update");
@@ -80,10 +81,10 @@ function initialize() {
setChatbotHeight();
setChatbotScroll();
setSlider();
+ setAvatar();
if (!historyLoaded) loadHistoryHtml();
if (!usernameGotten) getUserInfo();
- mObserver.observe(chatbotWrap, { attributes: true, childList: true, subtree: true, characterData: true });
- submitObserver.observe(cancelBtn, { attributes: true, characterData: true });
+ chatbotObserver.observe(chatbotIndicator, { attributes: true });
const lastCheckTime = localStorage.getItem('lastCheckTime') || 0;
const longTimeNoCheck = currentTime - lastCheckTime > 3 * 24 * 60 * 60 * 1000;
@@ -220,44 +221,54 @@ function setChatbotScroll() {
chatbotWrap.scrollTo(0,scrollHeight)
}
+var botAvatarUrl = "";
+var userAvatarUrl = "";
+function setAvatar() {
+ var botAvatar = gradioApp().getElementById("config-bot-avatar-url").innerText;
+ var userAvatar = gradioApp().getElementById("config-user-avatar-url").innerText;
-let timeoutId;
-let isThrottled = false;
-// 监听chatWrap元素的变化,为 bot 消息添加复制按钮。
-var mObserver = new MutationObserver(function (mutationsList) {
- for (const mmutation of mutationsList) {
- if (mmutation.type === 'childList') {
- for (var node of mmutation.addedNodes) {
- if (node.nodeType === 1 && node.classList.contains('message')) {
- saveHistoryHtml();
- disableSendBtn();
- document.querySelectorAll('#chuanhu-chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
- }
- }
- for (var node of mmutation.removedNodes) {
- if (node.nodeType === 1 && node.classList.contains('message')) {
- saveHistoryHtml();
- disableSendBtn();
- document.querySelectorAll('#chuanhu-chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
- }
- }
- } else if (mmutation.type === 'attributes') {
- if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_
- isThrottled = true;
- clearTimeout(timeoutId);
- timeoutId = setTimeout(() => {
- isThrottled = false;
- document.querySelectorAll('#chuanhu-chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
- saveHistoryHtml();
- disableSendBtn();
- }, 1500);
- }
+ if (botAvatar == "none") {
+ botAvatarUrl = "";
+ } else if (isImgUrl(botAvatar)) {
+ botAvatarUrl = botAvatar;
+ } else {
+ // botAvatarUrl = "https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63";
+ botAvatarUrl = "/file=web_assets/chatbot.png"
}
-});
-var submitObserver = new MutationObserver(function (mutationsList) {
- document.querySelectorAll('#chuanhu-chatbot .message-wrap .message.bot').forEach(addChuanhuButton);
- saveHistoryHtml();
+ if (userAvatar == "none") {
+ userAvatarUrl = "";
+ } else if (isImgUrl(userAvatar)) {
+ userAvatarUrl = userAvatar;
+ } else {
+ userAvatarUrl = "data:image/svg+xml,%3Csvg width='32px' height='32px' viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E%3Cg stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E%3Crect fill-opacity='0.5' fill='%23bbbbbb' x='0' y='0' width='32' height='32'%3E%3C/rect%3E%3Cg transform='translate(5, 4)' fill='%23999999' fill-opacity='0.8' fill-rule='nonzero'%3E%3Cpath d='M2.29372246,24 L19.7187739,24 C20.4277609,24 20.985212,23.8373915 21.3911272,23.5121746 C21.7970424,23.1869576 22,22.7418004 22,22.1767029 C22,21.3161536 21.7458721,20.4130827 21.2376163,19.4674902 C20.7293605,18.5218977 19.9956681,17.6371184 19.036539,16.8131524 C18.07741,15.9891863 16.9210688,15.3177115 15.5675154,14.798728 C14.2139621,14.2797445 12.6914569,14.0202527 11,14.0202527 C9.30854307,14.0202527 7.78603793,14.2797445 6.43248458,14.798728 C5.07893122,15.3177115 3.92259002,15.9891863 2.96346097,16.8131524 C2.00433193,17.6371184 1.27063951,18.5218977 0.762383704,19.4674902 C0.254127901,20.4130827 0,21.3161536 0,22.1767029 C0,22.7418004 0.202957595,23.1869576 0.608872784,23.5121746 C1.01478797,23.8373915 1.57640453,24 2.29372246,24 Z M11.0124963,11.6521659 C11.9498645,11.6521659 12.8155943,11.3906214 13.6096856,10.8675324 C14.403777,10.3444433 15.042131,9.63605539 15.5247478,8.74236856 C16.0073646,7.84868174 16.248673,6.84722464 16.248673,5.73799727 C16.248673,4.65135034 16.0071492,3.67452644 15.5241015,2.80752559 C15.0410538,1.94052474 14.4024842,1.25585359 13.6083929,0.753512156 C12.8143016,0.251170719 11.9490027,0 11.0124963,0 C10.0759899,0 9.20860836,0.255422879 8.41035158,0.766268638 C7.6120948,1.2771144 6.97352528,1.96622098 6.49464303,2.8335884 C6.01576078,3.70095582 5.77631966,4.67803631 5.77631966,5.76482987 C5.77631966,6.86452653 6.01554533,7.85912886 6.49399667,8.74863683 C6.97244801,9.63814481 7.60871935,10.3444433 8.40281069,10.8675324 C9.19690203,11.3906214 10.0667972,11.6521659 11.0124963,11.6521659 Z'%3E%3C/path%3E%3C/g%3E%3C/g%3E%3C/svg%3E";
+ }
+}
+
+function clearChatbot() {
+ clearHistoryHtml();
+ clearMessageRows();
+}
+
+function chatbotContentChanged(attempt = 1) {
+ for (var i = 0; i < attempt; i++) {
+ setTimeout(() => {
+ // clearMessageRows();
+ saveHistoryHtml();
+ disableSendBtn();
+ gradioApp().querySelectorAll('#chuanhu-chatbot .message-wrap .message.user').forEach((userElement) => {addAvatars(userElement, 'user')});
+ gradioApp().querySelectorAll('#chuanhu-chatbot .message-wrap .message.bot').forEach((botElement) => {addAvatars(botElement, 'bot'); addChuanhuButton(botElement)});
+ }, i === 0 ? 0 : 500);
+ }
+ // 理论上是不需要多次尝试执行的,可惜gradio的bug导致message可能没有渲染完毕,所以尝试500ms后再次执行
+}
+
+var chatbotObserver = new MutationObserver(() => {
+ clearMessageRows();
+ chatbotContentChanged(1);
+ if (chatbotIndicator.classList.contains('hide')) {
+ chatbotContentChanged(2);
+ }
});
// 监视页面内部 DOM 变动
diff --git a/web_assets/javascript/avatar.js b/web_assets/javascript/avatar.js
new file mode 100644
index 0000000000000000000000000000000000000000..14da1d3ba174320f8b52b6ceb18799909dff0c6e
--- /dev/null
+++ b/web_assets/javascript/avatar.js
@@ -0,0 +1,53 @@
+
+function addAvatars(messageElement, role='user'||'bot') {
+ if(messageElement.innerHTML === '') {
+ return;
+ }
+ if (messageElement.classList.contains('avatar-added') || messageElement.classList.contains('hide')) {
+ return;
+ }
+ if (role === 'bot' && botAvatarUrl === "" || role === 'user' && userAvatarUrl === "") {
+ messageElement.classList.add('avatar-added');
+ return;
+ }
+
+
+ const messageRow = document.createElement('div');
+ messageRow.classList.add('message-row');
+ messageElement.classList.add('avatar-added');
+
+ if (role === 'bot') {
+ messageRow.classList.add('bot-message-row');
+ } else if (role === 'user') {
+ messageRow.classList.add('user-message-row');
+ }
+
+ const avatarDiv = document.createElement('div');
+ avatarDiv.classList.add('chatbot-avatar');
+ if (role === 'bot') {
+ avatarDiv.classList.add('bot-avatar');
+ avatarDiv.innerHTML = ``;
+ } else if (role === 'user') {
+ avatarDiv.classList.add('user-avatar');
+ avatarDiv.innerHTML = ``;
+ }
+
+ messageElement.parentNode.replaceChild(messageRow, messageElement);
+
+ if (role === 'bot') {
+ messageRow.appendChild(avatarDiv);
+ messageRow.appendChild(messageElement);
+ } else if (role === 'user') {
+ messageRow.appendChild(messageElement);
+ messageRow.appendChild(avatarDiv);
+ }
+}
+
+function clearMessageRows() {
+ const messageRows = chatbotWrap.querySelectorAll('.message-row');
+ messageRows.forEach((messageRow) => {
+ if (messageRow.innerText === '') {
+ messageRow.parentNode.removeChild(messageRow);
+ }
+ });
+}
\ No newline at end of file
diff --git a/web_assets/javascript/chat-history.js b/web_assets/javascript/chat-history.js
index 12b8c799d217d608844ec7659f5d4b9f41dd7adc..1bc05355025a1a9abf83f51ff94b6815c604a6d6 100644
--- a/web_assets/javascript/chat-history.js
+++ b/web_assets/javascript/chat-history.js
@@ -25,14 +25,6 @@ function loadHistoryHtml() {
return; // logged in, do nothing
}
if (!historyLoaded) {
- var buttons = tempDiv.querySelectorAll('button.chuanhu-btn');
- var gradioCopyButtons = tempDiv.querySelectorAll('button.copy_code_button');
- for (var i = 0; i < buttons.length; i++) {
- buttons[i].parentNode.removeChild(buttons[i]);
- }
- for (var i = 0; i < gradioCopyButtons.length; i++) {
- gradioCopyButtons[i].parentNode.removeChild(gradioCopyButtons[i]);
- }
var fakeHistory = document.createElement('div');
fakeHistory.classList.add('history-message');
fakeHistory.innerHTML = tempDiv.innerHTML;
diff --git a/web_assets/javascript/message-button.js b/web_assets/javascript/message-button.js
index 826e484535734692012fe788659cb39deea1e41e..e16b065c8c0ea84b927ebbb46b7ff336d085b8d9 100644
--- a/web_assets/javascript/message-button.js
+++ b/web_assets/javascript/message-button.js
@@ -4,20 +4,15 @@
function addChuanhuButton(botElement) {
var rawMessage = botElement.querySelector('.raw-message');
var mdMessage = botElement.querySelector('.md-message');
- // var gradioCopyMsgBtn = botElement.querySelector('div.icon-button>button[title="copy"]'); // 获取 gradio 的 copy button,它可以读取真正的原始 message
- if (!rawMessage) {
+
+ if (!rawMessage) { // 如果没有 raw message,说明是早期历史记录,去除按钮
var buttons = botElement.querySelectorAll('button.chuanhu-btn');
for (var i = 0; i < buttons.length; i++) {
buttons[i].parentNode.removeChild(buttons[i]);
}
return;
}
- var oldCopyButton = null;
- var oldToggleButton = null;
- oldCopyButton = botElement.querySelector('button.copy-bot-btn');
- oldToggleButton = botElement.querySelector('button.toggle-md-btn');
- if (oldCopyButton) oldCopyButton.remove();
- if (oldToggleButton) oldToggleButton.remove();
+ botElement.querySelectorAll('button.copy-bot-btn, button.toggle-md-btn').forEach(btn => btn.remove()); // 就算原先有了,也必须重新添加,而不是跳过
// Copy bot button
var copyButton = document.createElement('button');
@@ -66,13 +61,14 @@ function addChuanhuButton(botElement) {
toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon;
toggleButton.addEventListener('click', () => {
renderMarkdown = mdMessage.classList.contains('hideM');
- if (renderMarkdown){
+ if (renderMarkdown) {
renderMarkdownText(botElement);
toggleButton.innerHTML=rawIcon;
} else {
removeMarkdownText(botElement);
toggleButton.innerHTML=mdIcon;
}
+ chatbotContentChanged(1); // to set md or raw in read-only history html
});
botElement.insertBefore(toggleButton, copyButton);
@@ -85,8 +81,7 @@ function addChuanhuButton(botElement) {
function removeMarkdownText(message) {
var rawDiv = message.querySelector('.raw-message');
if (rawDiv) {
- rawPre = rawDiv.querySelector('pre');
- if (rawPre) rawDiv.innerHTML = rawPre.innerHTML;
+ rawDiv.innerHTML = rawDiv.querySelector('pre')?.innerHTML || rawDiv.innerHTML;
rawDiv.classList.remove('hideM');
}
var mdDiv = message.querySelector('.md-message');
diff --git a/web_assets/javascript/utils.js b/web_assets/javascript/utils.js
index 1cd783fe9191273f3eb3cb97578272e0abc77a7e..cda208a085be790cca1cf1a18bba27550caeca30 100644
--- a/web_assets/javascript/utils.js
+++ b/web_assets/javascript/utils.js
@@ -21,6 +21,22 @@ function transEventListeners(target, source, events) {
}
+function isImgUrl(url) {
+ const imageExtensions = /\.(jpg|jpeg|png|gif|bmp|webp)$/i;
+ if (url.startsWith('data:image/')) {
+ return true;
+ }
+ if (url.match(imageExtensions)) {
+ return true;
+ }
+ if (url.startsWith('http://') || url.startsWith('https://')) {
+ return true;
+ }
+
+ return false;
+}
+
+
/* NOTE: These reload functions are not used in the current version of the code.
* From stable-diffusion-webui
*/
diff --git a/web_assets/stylesheet/ChuanhuChat.css b/web_assets/stylesheet/ChuanhuChat.css
index c812c1933984c38fcd04ddc1f4b4b1765e203f9b..62d41dbd061d200ba5a6841b318aea22950d1791 100644
--- a/web_assets/stylesheet/ChuanhuChat.css
+++ b/web_assets/stylesheet/ChuanhuChat.css
@@ -103,3 +103,10 @@
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
+
+#chatbot-buttons button {
+ display: inline-block;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
\ No newline at end of file
diff --git a/web_assets/stylesheet/chatbot.css b/web_assets/stylesheet/chatbot.css
index fdd8dd52f2014f2ec6ff626e38a8bb2fa2e77ff9..d99584282c052861e5e401add62c3b94eb48ec65 100644
--- a/web_assets/stylesheet/chatbot.css
+++ b/web_assets/stylesheet/chatbot.css
@@ -84,13 +84,13 @@ hr.append-display {
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
}
[data-testid = "bot"] {
- max-width: 85%;
- border-bottom-left-radius: 0 !important;
+ max-width: calc(85% - 38px);
+ border-top-left-radius: 0 !important;
}
[data-testid = "user"] {
- max-width: 85%;
+ max-width: calc(85% - 38px);
width: auto !important;
- border-bottom-right-radius: 0 !important;
+ border-top-right-radius: 0 !important;
}
/* 屏幕宽度大于等于500px的设备 */
@@ -112,7 +112,10 @@ hr.append-display {
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
[data-testid = "bot"] {
- max-width: 95% !important;
+ max-width: calc(98% - 20px) !important;
+ }
+ .chatbot-avatar {
+ display: none;
}
#app-title h1{
letter-spacing: -1px; font-size: 22px;
@@ -201,7 +204,7 @@ hr.append-display {
/* history message */
.wrapper>.wrap>.history-message {
- padding: 10px !important;
+ padding-bottom: 10px !important;
}
.history-message {
/* padding: 0 !important; */
@@ -239,4 +242,37 @@ hr.append-display {
/* #chuanhu-chatbot {
transition: height 0.3s ease;
note: find it better without transition animation...;
-} */
\ No newline at end of file
+} */
+
+
+.message-row {
+ flex-direction: row;
+ display: flex;
+ gap: 8px;
+ width: 100%;
+}
+.bot-message-row {
+ justify-content: flex-start;
+}
+.user-message-row {
+ justify-content: flex-end;
+}
+.chatbot-avatar {
+ width: 32px;
+ height: 32px;
+ background-color: transparent;
+ background-size: cover;
+ border-radius: 5px !important;
+}
+.chatbot-avatar.bot-avatar {
+ margin-left: 5px;
+}
+.chatbot-avatar.user-avatar {
+ margin-right: 10px;
+}
+.chatbot-avatar img {
+ border-radius: 5px !important;
+ object-fit: cover;
+ width: 100%;
+ height: 100%;
+}
\ No newline at end of file
diff --git a/web_assets/stylesheet/markdown.css b/web_assets/stylesheet/markdown.css
index d9e4608063eed4abcbff4971e3fa210d5af60798..6b2215ad0d9284192a8cad21aa79e904aa5e8b16 100644
--- a/web_assets/stylesheet/markdown.css
+++ b/web_assets/stylesheet/markdown.css
@@ -1,5 +1,5 @@
-.message-wrap>div img{
+.md-message img{
border-radius: 10px !important;
}