JohnSmith9982 commited on
Commit
97ab965
1 Parent(s): 3669269

Upload 114 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. CITATION.cff +5 -5
  2. ChuanhuChatbot.py +55 -12
  3. Dockerfile +11 -8
  4. config_example.json +7 -0
  5. locale/en_US.json +23 -9
  6. locale/ja_JP.json +23 -9
  7. locale/ko_KR.json +89 -0
  8. locale/sv-SE.json +87 -0
  9. modules/.DS_Store +0 -0
  10. modules/__pycache__/__init__.cpython-311.pyc +0 -0
  11. modules/__pycache__/__init__.cpython-39.pyc +0 -0
  12. modules/__pycache__/base_model.cpython-311.pyc +0 -0
  13. modules/__pycache__/base_model.cpython-39.pyc +0 -0
  14. modules/__pycache__/config.cpython-311.pyc +0 -0
  15. modules/__pycache__/config.cpython-39.pyc +0 -0
  16. modules/__pycache__/index_func.cpython-311.pyc +0 -0
  17. modules/__pycache__/index_func.cpython-39.pyc +0 -0
  18. modules/__pycache__/llama_func.cpython-311.pyc +0 -0
  19. modules/__pycache__/llama_func.cpython-39.pyc +0 -0
  20. modules/__pycache__/models.cpython-311.pyc +0 -0
  21. modules/__pycache__/models.cpython-39.pyc +0 -0
  22. modules/__pycache__/overwrites.cpython-311.pyc +0 -0
  23. modules/__pycache__/overwrites.cpython-39.pyc +0 -0
  24. modules/__pycache__/pdf_func.cpython-311.pyc +0 -0
  25. modules/__pycache__/pdf_func.cpython-39.pyc +0 -0
  26. modules/__pycache__/presets.cpython-311.pyc +0 -0
  27. modules/__pycache__/presets.cpython-39.pyc +0 -0
  28. modules/__pycache__/repo.cpython-311.pyc +0 -0
  29. modules/__pycache__/shared.cpython-311.pyc +0 -0
  30. modules/__pycache__/shared.cpython-39.pyc +0 -0
  31. modules/__pycache__/train_func.cpython-311.pyc +0 -0
  32. modules/__pycache__/utils.cpython-311.pyc +0 -0
  33. modules/__pycache__/utils.cpython-39.pyc +0 -0
  34. modules/__pycache__/webui.cpython-311.pyc +0 -0
  35. modules/__pycache__/webui_locale.cpython-311.pyc +0 -0
  36. modules/__pycache__/webui_locale.cpython-39.pyc +0 -0
  37. modules/config.py +20 -9
  38. modules/index_func.py +2 -15
  39. modules/models/__pycache__/base_model.cpython-311.pyc +0 -0
  40. modules/models/__pycache__/models.cpython-311.pyc +0 -0
  41. modules/models/base_model.py +4 -1
  42. modules/models/midjourney.py +385 -0
  43. modules/models/models.py +7 -2
  44. modules/presets.py +1 -0
  45. modules/repo.py +40 -16
  46. modules/train_func.py +161 -0
  47. modules/utils.py +33 -10
  48. readme/README_en.md +21 -8
  49. readme/README_ja.md +21 -8
  50. requirements.txt +3 -2
CITATION.cff CHANGED
@@ -1,5 +1,5 @@
1
  cff-version: 1.2.0
2
- title: ChuanhuChatGPT
3
  message: >-
4
  If you use this software, please cite it using these
5
  metadata.
@@ -13,8 +13,8 @@ authors:
13
  orcid: https://orcid.org/0009-0005-0357-272X
14
  repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
15
  url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
16
- abstract: Provided a light and easy to use interface for ChatGPT API
17
  license: GPL-3.0
18
- commit: bd0034c37e5af6a90bd9c2f7dd073f6cd27c61af
19
- version: '20230405'
20
- date-released: '2023-04-05'
 
1
  cff-version: 1.2.0
2
+ title: Chuanhu Chat
3
  message: >-
4
  If you use this software, please cite it using these
5
  metadata.
 
13
  orcid: https://orcid.org/0009-0005-0357-272X
14
  repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
15
  url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
16
+ abstract: This software provides a light and easy-to-use interface for ChatGPT API and many LLMs.
17
  license: GPL-3.0
18
+ commit: c6c08bc62ef80e37c8be52f65f9b6051a7eea1fa
19
+ version: '20230709'
20
+ date-released: '2023-07-09'
ChuanhuChatbot.py CHANGED
@@ -1,8 +1,11 @@
1
  # -*- coding:utf-8 -*-
2
- import os
3
  import logging
4
- import sys
 
 
 
5
 
 
6
  import gradio as gr
7
 
8
  from modules import config
@@ -12,6 +15,7 @@ from modules.presets import *
12
  from modules.overwrites import *
13
  from modules.webui import *
14
  from modules.repo import *
 
15
  from modules.models.models import get_model
16
 
17
  logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -40,6 +44,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
40
  status_display = gr.Markdown(get_geoip(), elem_id="status-display")
41
  with gr.Row(elem_id="float-display"):
42
  user_info = gr.Markdown(value="getting user info...", elem_id="user-info")
 
43
  update_info = gr.HTML(get_html("update.html").format(
44
  current_version=repo_tag_html(),
45
  version_time=version_time(),
@@ -63,13 +68,17 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
63
  with gr.Column(min_width=42, scale=1):
64
  submitBtn = gr.Button(value="", variant="primary", elem_id="submit-btn")
65
  cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel-btn")
66
- with gr.Row():
67
- emptyBtn = gr.Button(
68
- i18n("🧹 新的对话"), elem_id="empty-btn"
69
- )
70
- retryBtn = gr.Button(i18n("🔄 重新生成"))
71
- delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
72
- delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
 
 
 
 
73
  with gr.Row(visible=False) as like_dislike_area:
74
  with gr.Column(min_width=20, scale=1):
75
  likeBtn = gr.Button(i18n("👍"))
@@ -178,6 +187,25 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
178
  with gr.Column():
179
  downloadFile = gr.File(interactive=True)
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  with gr.Tab(label=i18n("高级")):
182
  gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert-block")
183
  use_streaming_checkbox = gr.Checkbox(
@@ -292,9 +320,9 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
292
  elem_classes="view-only-textbox no-container",
293
  )
294
  # changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
295
-
296
  updateChuanhuBtn = gr.Button(visible=False, elem_classes="invisible-btn", elem_id="update-chuanhu-btn")
297
 
 
298
  gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
299
  gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
300
 
@@ -376,7 +404,7 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
376
  inputs=[current_model],
377
  outputs=[chatbot, status_display],
378
  show_progress=True,
379
- _js='clearHistoryHtml',
380
  )
381
 
382
  retryBtn.click(**start_outputing_args).then(
@@ -466,6 +494,18 @@ with gr.Blocks(theme=small_and_beautiful_theme) as demo:
466
  historyFileSelectDropdown.change(**load_history_from_file_args)
467
  downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
468
 
 
 
 
 
 
 
 
 
 
 
 
 
469
  # Advanced
470
  max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
471
  temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
@@ -513,4 +553,7 @@ demo.title = i18n("川虎Chat 🚀")
513
 
514
  if __name__ == "__main__":
515
  reload_javascript()
516
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch()
 
 
 
 
1
  # -*- coding:utf-8 -*-
 
2
  import logging
3
+ logging.basicConfig(
4
+ level=logging.INFO,
5
+ format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
6
+ )
7
 
8
+ import colorama
9
  import gradio as gr
10
 
11
  from modules import config
 
15
  from modules.overwrites import *
16
  from modules.webui import *
17
  from modules.repo import *
18
+ from modules.train_func import *
19
  from modules.models.models import get_model
20
 
21
  logging.getLogger("httpx").setLevel(logging.WARNING)
 
44
  status_display = gr.Markdown(get_geoip(), elem_id="status-display")
45
  with gr.Row(elem_id="float-display"):
46
  user_info = gr.Markdown(value="getting user info...", elem_id="user-info")
47
+ config_info = gr.HTML(get_html("config_info.html").format(bot_avatar=config.bot_avatar, user_avatar=config.user_avatar), visible=False, elem_id="config-info")
48
  update_info = gr.HTML(get_html("update.html").format(
49
  current_version=repo_tag_html(),
50
  version_time=version_time(),
 
68
  with gr.Column(min_width=42, scale=1):
69
  submitBtn = gr.Button(value="", variant="primary", elem_id="submit-btn")
70
  cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel-btn")
71
+ with gr.Row(elem_id="chatbot-buttons"):
72
+ with gr.Column(min_width=120, scale=1):
73
+ emptyBtn = gr.Button(
74
+ i18n("🧹 新的对话"), elem_id="empty-btn"
75
+ )
76
+ with gr.Column(min_width=120, scale=1):
77
+ retryBtn = gr.Button(i18n("🔄 重新生成"))
78
+ with gr.Column(min_width=120, scale=1):
79
+ delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
80
+ with gr.Column(min_width=120, scale=1):
81
+ delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
82
  with gr.Row(visible=False) as like_dislike_area:
83
  with gr.Column(min_width=20, scale=1):
84
  likeBtn = gr.Button(i18n("👍"))
 
187
  with gr.Column():
188
  downloadFile = gr.File(interactive=True)
189
 
190
+ with gr.Tab(label=i18n("微调")):
191
+ openai_train_status = gr.Markdown(label=i18n("训练状态"), value=i18n("在这里[查看使用介绍](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/%E4%BD%BF%E7%94%A8%E6%95%99%E7%A8%8B#%E5%BE%AE%E8%B0%83-gpt-35)"))
192
+
193
+ with gr.Tab(label=i18n("准备数据集")):
194
+ dataset_preview_json = gr.JSON(label=i18n("数据集预览"), readonly=True)
195
+ dataset_selection = gr.Files(label = i18n("选择数据集"), file_types=[".xlsx", ".jsonl"], file_count="single")
196
+ upload_to_openai_btn = gr.Button(i18n("上传到OpenAI"), variant="primary", interactive=False)
197
+
198
+ with gr.Tab(label=i18n("训练")):
199
+ openai_ft_file_id = gr.Textbox(label=i18n("文件ID"), value="", lines=1, placeholder=i18n("上传到 OpenAI 后自动填充"))
200
+ openai_ft_suffix = gr.Textbox(label=i18n("模型名称后缀"), value="", lines=1, placeholder=i18n("可选,用于区分不同的模型"))
201
+ openai_train_epoch_slider = gr.Slider(label=i18n("训练轮数(Epochs)"), minimum=1, maximum=100, value=3, step=1, interactive=True)
202
+ openai_start_train_btn = gr.Button(i18n("开始训练"), variant="primary", interactive=False)
203
+
204
+ with gr.Tab(label=i18n("状态")):
205
+ openai_status_refresh_btn = gr.Button(i18n("刷新状态"))
206
+ openai_cancel_all_jobs_btn = gr.Button(i18n("取消所有任务"))
207
+ add_to_models_btn = gr.Button(i18n("添加训练好的模型到模型列表"), interactive=False)
208
+
209
  with gr.Tab(label=i18n("高级")):
210
  gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert-block")
211
  use_streaming_checkbox = gr.Checkbox(
 
320
  elem_classes="view-only-textbox no-container",
321
  )
322
  # changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
 
323
  updateChuanhuBtn = gr.Button(visible=False, elem_classes="invisible-btn", elem_id="update-chuanhu-btn")
324
 
325
+
326
  gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
327
  gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
328
 
 
404
  inputs=[current_model],
405
  outputs=[chatbot, status_display],
406
  show_progress=True,
407
+ _js='clearChatbot',
408
  )
409
 
410
  retryBtn.click(**start_outputing_args).then(
 
494
  historyFileSelectDropdown.change(**load_history_from_file_args)
495
  downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot])
496
 
497
+ # Train
498
+ dataset_selection.upload(handle_dataset_selection, dataset_selection, [dataset_preview_json, upload_to_openai_btn, openai_train_status])
499
+ dataset_selection.clear(handle_dataset_clear, [], [dataset_preview_json, upload_to_openai_btn])
500
+ upload_to_openai_btn.click(upload_to_openai, [dataset_selection], [openai_ft_file_id, openai_train_status], show_progress=True)
501
+
502
+ openai_ft_file_id.change(lambda x: gr.update(interactive=True) if len(x) > 0 else gr.update(interactive=False), [openai_ft_file_id], [openai_start_train_btn])
503
+ openai_start_train_btn.click(start_training, [openai_ft_file_id, openai_ft_suffix, openai_train_epoch_slider], [openai_train_status])
504
+
505
+ openai_status_refresh_btn.click(get_training_status, [], [openai_train_status, add_to_models_btn])
506
+ add_to_models_btn.click(add_to_models, [], [model_select_dropdown, openai_train_status], show_progress=True)
507
+ openai_cancel_all_jobs_btn.click(cancel_all_jobs, [], [openai_train_status], show_progress=True)
508
+
509
  # Advanced
510
  max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
511
  temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
 
553
 
554
  if __name__ == "__main__":
555
  reload_javascript()
556
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
557
+ blocked_paths=["config.json"],
558
+ favicon_path="./web_assets/favicon.ico",
559
+ )
Dockerfile CHANGED
@@ -1,15 +1,18 @@
1
- FROM python:3.9 as builder
2
- RUN apt-get update && apt-get install -y build-essential
 
 
 
3
  COPY requirements.txt .
4
  COPY requirements_advanced.txt .
5
- RUN pip install --user -r requirements.txt
6
- # RUN pip install --user -r requirements_advanced.txt
7
 
8
- FROM python:3.9
9
- MAINTAINER iskoldt
10
  COPY --from=builder /root/.local /root/.local
11
  ENV PATH=/root/.local/bin:$PATH
12
  COPY . /app
13
  WORKDIR /app
14
- ENV dockerrun yes
15
- CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
 
1
+ FROM python:3.9-slim-buster as builder
2
+ RUN apt-get update \
3
+ && apt-get install -y build-essential \
4
+ && apt-get clean \
5
+ && rm -rf /var/lib/apt/lists/*
6
  COPY requirements.txt .
7
  COPY requirements_advanced.txt .
8
+ RUN pip install --user --no-cache-dir -r requirements.txt
9
+ # RUN pip install --user --no-cache-dir -r requirements_advanced.txt
10
 
11
+ FROM python:3.9-slim-buster
12
+ LABEL maintainer="iskoldt"
13
  COPY --from=builder /root/.local /root/.local
14
  ENV PATH=/root/.local/bin:$PATH
15
  COPY . /app
16
  WORKDIR /app
17
+ ENV dockerrun=yes
18
+ CMD ["python3", "-u", "ChuanhuChatbot.py","2>&1", "|", "tee", "/var/log/application.log"]
config_example.json CHANGED
@@ -7,6 +7,11 @@
7
  "xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
8
  "minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
9
  "minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
 
 
 
 
 
10
 
11
  //== Azure ==
12
  "openai_api_type": "openai", // 可选项:azure, openai
@@ -24,6 +29,8 @@
24
  "hide_history_when_not_logged_in": false, //未登录情况下是否不展示对话历史
25
  "check_update": true, //是否启用检查更新
26
  "default_model": "gpt-3.5-turbo", // 默认模型
 
 
27
 
28
  //== API 用量 ==
29
  "show_api_billing": false, //是否显示OpenAI API用量(启用需要填写sensitive_id)
 
7
  "xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
8
  "minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
9
  "minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
10
+ "midjourney_proxy_api_base": "https://xxx/mj", // 你的 https://github.com/novicezk/midjourney-proxy 代理地址
11
+ "midjourney_proxy_api_secret": "", // 你的 MidJourney Proxy API Secret,用于鉴权访问 api,可选
12
+ "midjourney_discord_proxy_url": "", // 你的 MidJourney Discord Proxy URL,用于对生成对图进行反代,可选
13
+ "midjourney_temp_folder": "./tmp", // 你的 MidJourney 临时文件夹,用于存放生成的图片,填空则关闭自动下载切图(直接显示MJ的四宫格图)
14
+
15
 
16
  //== Azure ==
17
  "openai_api_type": "openai", // 可选项:azure, openai
 
29
  "hide_history_when_not_logged_in": false, //未登录情况下是否不展示对话历史
30
  "check_update": true, //是否启用检查更新
31
  "default_model": "gpt-3.5-turbo", // 默认模型
32
+ "bot_avatar": "default", // 机器人头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
33
+ "user_avatar": "default", // 用户头像,可填写图片链接、Data URL (base64),或者"none"(不显示头像)
34
 
35
  //== API 用量 ==
36
  "show_api_billing": false, //是否显示OpenAI API用量(启用需要填写sensitive_id)
locale/en_US.json CHANGED
@@ -32,24 +32,33 @@
32
  "📝 导出为Markdown": "📝 Export as Markdown",
33
  "默认保存于history文件夹": "Default save in history folder",
34
  "高级": "Advanced",
35
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ Caution: Changes require care. ⚠️\n\nIf unable to use, restore default settings.",
36
  "参数": "Parameters",
37
- "在这里输入停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
38
  "用于定位滥用行为": "Used to locate abuse",
39
  "用户名": "Username",
40
- "网络设置": "Network Settings",
41
  "在这里输入API-Host...": "Type in API-Host here...",
42
  "🔄 切换API地址": "🔄 Switch API Address",
43
- "在这里输入代理地址...": "Type in proxy address here...",
44
- "代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809)",
45
  "🔄 设置代理地址": "🔄 Set Proxy Address",
46
- "🔙 恢复默认设置": "🔙 Restore Default Settings",
 
 
 
 
 
 
 
47
  "川虎Chat 🚀": "Chuanhu Chat 🚀",
48
  "开始实时传输回答……": "Start streaming output...",
49
  "Token 计数: ": "Token Count: ",
50
- ",本次对话累计消耗了 ": "Total cost for this dialogue is ",
51
  "**获取API使用情况失败**": "**Failed to get API usage**",
 
 
52
  "**本月使用金额** ": "**Monthly usage** ",
 
53
  "获取API使用情况失败:": "Failed to get API usage:",
54
  "API密钥更改为了": "The API key is changed to",
55
  "JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
@@ -64,10 +73,15 @@
64
  "API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
65
  "请输入对话内容。": "Enter the content of the conversation.",
66
  "账单信息不适用": "Billing information is not applicable",
67
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)[明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "developor: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) and [明昭MZhao](https://space.bilibili.com/24807452)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
68
  "切换亮暗色主题": "Switch light/dark theme",
69
  "您的IP区域:未知。": "Your IP region: Unknown.",
70
  "获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
71
  "。你仍然可以使用聊天功能。": ". You can still use the chat function.",
72
- "您的IP区域:": "Your IP region: "
 
 
 
 
 
73
  }
 
32
  "📝 导出为Markdown": "📝 Export as Markdown",
33
  "默认保存于history文件夹": "Default save in history folder",
34
  "高级": "Advanced",
35
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Caution: Changes require care. ⚠️",
36
  "参数": "Parameters",
37
+ "停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
38
  "用于定位滥用行为": "Used to locate abuse",
39
  "用户名": "Username",
 
40
  "在这里输入API-Host...": "Type in API-Host here...",
41
  "🔄 切换API地址": "🔄 Switch API Address",
42
+ "未设置代理...": "No proxy...",
43
+ "代理地址": "Proxy address",
44
  "🔄 设置代理地址": "🔄 Set Proxy Address",
45
+ "🔙 恢复默认网络设置": "🔙 Reset Network Settings",
46
+ "🔄 检查更新...": "🔄 Check for Update...",
47
+ "取消": "Cancel",
48
+ "更新": "Update",
49
+ "详情": "Details",
50
+ "好": "OK",
51
+ "更新成功,请重启本程序": "Updated successfully, please restart this program",
52
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Update failed, please try [manually updating](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
53
  "川虎Chat 🚀": "Chuanhu Chat 🚀",
54
  "开始实时传输回答……": "Start streaming output...",
55
  "Token 计数: ": "Token Count: ",
56
+ ",本次对话累计消耗了 ": ", Total cost for this dialogue is ",
57
  "**获取API使用情况失败**": "**Failed to get API usage**",
58
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Failed to get API usage**, correct sensitive_id needed in `config.json`",
59
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Failed to get API usage**, wrong or expired sensitive_id",
60
  "**本月使用金额** ": "**Monthly usage** ",
61
+ "本月使用金额": "Monthly usage",
62
  "获取API使用情况失败:": "Failed to get API usage:",
63
  "API密钥更改为了": "The API key is changed to",
64
  "JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
 
73
  "API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
74
  "请输入对话内容。": "Enter the content of the conversation.",
75
  "账单信息不适用": "Billing information is not applicable",
76
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Developed by Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) and [Keldos](https://github.com/Keldos-Li)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
77
  "切换亮暗色主题": "Switch light/dark theme",
78
  "您的IP区域:未知。": "Your IP region: Unknown.",
79
  "获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
80
  "。你仍然可以使用聊天功能。": ". You can still use the chat function.",
81
+ "您的IP区域:": "Your IP region: ",
82
+ "总结": "Summarize",
83
+ "生成内容总结中……": "Generating content summary...",
84
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Due to the following reasons, Google refuses to provide an answer to PaLM: \n\n",
85
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ To ensure the security of API-Key, please modify the network settings in the configuration file `config.json`.",
86
+ "网络参数": "Network parameter"
87
  }
locale/ja_JP.json CHANGED
@@ -32,24 +32,33 @@
32
  "📝 导出为Markdown": "📝 Markdownでエクスポート",
33
  "默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
34
  "高级": "Advanced",
35
- "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ 変更には慎重に ⚠️\n\nもし動作しない場合は、デフォルト設定に戻してください。",
36
  "参数": "パラメータ",
37
- "在这里输入停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
38
  "用于定位滥用行为": "不正行為を特定するために使用されます",
39
  "用户名": "ユーザー名",
40
- "网络设置": "ネットワーク設定",
41
  "在这里输入API-Host...": "API-Hostを入力してください...",
42
  "🔄 切换API地址": "🔄 APIアドレスを切り替え",
43
- "在这里输入代理地址...": "プロキシアドレスを入力してください...",
44
- "代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
45
  "🔄 设置代理地址": "🔄 プロキシアドレスを設定",
46
- "🔙 恢复默认设置": "🔙 デフォルト設定に戻す",
 
 
 
 
 
 
 
47
  "川虎Chat 🚀": "川虎Chat 🚀",
48
  "开始实时传输回答……": "ストリーム出力開始……",
49
  "Token 计数: ": "Token数: ",
50
  ",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
51
  "**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
 
 
52
  "**本月使用金额** ": "**今月の使用料金** ",
 
53
  "获取API使用情况失败:": "API使用状況の取得に失敗しました:",
54
  "API密钥更改为了": "APIキーが変更されました",
55
  "JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
@@ -64,10 +73,15 @@
64
  "API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
65
  "请输入对话内容。": "会話内容を入力してください。",
66
  "账单信息不适用": "課金情報は対象外です",
67
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)[明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
68
  "切换亮暗色主题": "テーマの明暗切替",
69
  "您的IP区域:未知。": "あなたのIPアドレス地域:不明",
70
  "获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
71
  "。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
72
- "您的IP区域:": "あなたのIPアドレス地域:"
73
- }
 
 
 
 
 
 
32
  "📝 导出为Markdown": "📝 Markdownでエクスポート",
33
  "默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
34
  "高级": "Advanced",
35
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 変更には慎重に ⚠️",
36
  "参数": "パラメータ",
37
+ "停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
38
  "用于定位滥用行为": "不正行為を特定するために使用されます",
39
  "用户名": "ユーザー名",
 
40
  "在这里输入API-Host...": "API-Hostを入力してください...",
41
  "🔄 切换API地址": "🔄 APIアドレスを切り替え",
42
+ "未设置代理...": "代理が設定されていません...",
43
+ "代理地址": "プロキシアドレス",
44
  "🔄 设置代理地址": "🔄 プロキシアドレスを設定",
45
+ "🔙 恢复默认网络设置": "🔙 ネットワーク設定のリセット",
46
+ "🔄 检查更新...": "🔄 アップデートをチェック...",
47
+ "取消": "キャンセル",
48
+ "更新": "アップデート",
49
+ "详情": "詳細",
50
+ "好": "はい",
51
+ "更新成功,请重启本程序": "更新が成功しました、このプログラムを再起動してください",
52
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "更新に失敗しました、[手動での更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)をお試しください。",
53
  "川虎Chat 🚀": "川虎Chat 🚀",
54
  "开始实时传输回答……": "ストリーム出力開始……",
55
  "Token 计数: ": "Token数: ",
56
  ",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
57
  "**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
58
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API使用状況の取得に失敗しました**、`config.json`に正しい`sensitive_id`を入力する必要があります",
59
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API使用状況の取得に失敗しました**、sensitive_idが間違っているか、期限切れです",
60
  "**本月使用金额** ": "**今月の使用料金** ",
61
+ "本月使用金额": "今月の使用料金",
62
  "获取API使用情况失败:": "API使用状況の取得に失敗しました:",
63
  "API密钥更改为了": "APIキーが変更されました",
64
  "JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
 
73
  "API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
74
  "请输入对话内容。": "会話内容を入力してください。",
75
  "账单信息不适用": "課金情報は対象外です",
76
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452) と [Keldos](https://github.com/Keldos-Li)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
77
  "切换亮暗色主题": "テーマの明暗切替",
78
  "您的IP区域:未知。": "あなたのIPアドレス地域:不明",
79
  "获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
80
  "。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
81
+ "您的IP区域:": "あなたのIPアドレス地域:",
82
+ "总结": "要約する",
83
+ "生成内容总结中……": "コンテンツ概要を生成しています...",
84
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Googleは以下の理由から、PaLMの回答を返すことを拒否しています:\n\n",
85
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ APIキーの安全性を確保するために、`config.json`ファイルでネットワーク設定を変更してください。",
86
+ "网络参数": "ネットワークパラメータ"
87
+ }
locale/ko_KR.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "未命名对话历史记录": "이름없는 대화 기록",
3
+ "在这里输入": "여기에 입력하세요",
4
+ "🧹 新的对话": "🧹 새로운 대화",
5
+ "🔄 重新生成": "🔄 재생성",
6
+ "🗑️ 删除最旧对话": "🗑️ 가장 오래된 대화 삭제",
7
+ "🗑️ 删除最新对话": "🗑️ 최신 대화 삭제",
8
+ "🗑️ 删除": "🗑️ 삭제",
9
+ "模型": "LLM 모델",
10
+ "多账号模式已开启,无需输入key,可直接开始对话": "다중 계정 모드가 활성화되어 있으므로 키를 입력할 필요가 없이 바로 대화를 시작할 수 있습니다",
11
+ "**发送消息** 或 **提交key** 以显示额度": "**메세지를 전송** 하거나 **Key를 입력**하여 크레딧 표시",
12
+ "选择模型": "모델 선택",
13
+ "选择LoRA模型": "LoRA 모델 선택",
14
+ "实时传输回答": "실시간 전송",
15
+ "单轮对话": "단일 대화",
16
+ "使用在线搜索": "온라인 검색 사용",
17
+ "选择回复语言(针对搜索&索引功能)": "답장 언어 선택 (검색 & 인덱스용)",
18
+ "上传索引文件": "업로드",
19
+ "双栏pdf": "2-column pdf",
20
+ "识别公式": "formula OCR",
21
+ "在这里输入System Prompt...": "여기에 시스템 프롬프트를 입력하세요...",
22
+ "加载Prompt模板": "프롬프트 템플릿 불러오기",
23
+ "选择Prompt模板集合文件": "프롬프트 콜렉션 파일 선택",
24
+ "🔄 刷新": "🔄 새로고침",
25
+ "从Prompt模板中加载": "프롬프트 템플릿에서 불러오기",
26
+ "保存/加载": "저장/불러오기",
27
+ "保存/加载对话历史记录": "대화 기록 저장/불러오기",
28
+ "从列表中加载对话": "리스트에서 대화 불러오기",
29
+ "设置文件名: 默认为.json,可选为.md": "파일 이름 설정: 기본값: .json, 선택: .md",
30
+ "设置保存文件名": "저장 파일명 설정",
31
+ "对话历史记录": "대화 기록",
32
+ "💾 保存对话": "💾 대화 저장",
33
+ "📝 导出为Markdown": "📝 마크다운으로 내보내기",
34
+ "默认保存于history文件夹": "히스토리 폴더에 기본 저장",
35
+ "高级": "고급",
36
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ 주의: 변경시 주의하세요. ⚠️",
37
+ "参数": "파라미터들",
38
+ "停止符,用英文逗号隔开...": "여기에 정지 토큰 입력, ','로 구분됨...",
39
+ "用于定位滥用行为": "악용 사례 파악에 활용됨",
40
+ "用户名": "사용자 이름",
41
+ "在这里输入API-Host...": "여기에 API host를 입력하세요...",
42
+ "🔄 切换API地址": "🔄 API 주소 변경",
43
+ "未设置代理...": "대리인이 설정되지 않았습니다...",
44
+ "代理地址": "프록시 주소",
45
+ "🔄 设置代理地址": "🔄 프록시 주소 설정",
46
+ "🔙 恢复默认网络设置": "🔙 네트워크 설정 초기화",
47
+ "🔄 检查更新...": "🔄 업데이트 확인...",
48
+ "取消": "취소",
49
+ "更新": "업데이트",
50
+ "详情": "상세",
51
+ "好": "예",
52
+ "更新成功,请重启本程序": "업데이트 성공, 이 프로그램을 재시작 해주세요",
53
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "업데이트 실패, [수동 업데이트](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)를 시도하십시오",
54
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
55
+ "开始实时传输回答……": "실시간 응답 출력 시작...",
56
+ "Token 计数: ": "토큰 수: ",
57
+ ",本次对话累计消耗了 ": ",이 대화의 전체 비용은 ",
58
+ "**获取API使用情况失败**": "**API 사용량 가져오기 실패**",
59
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**API 사용량 가져오기 실패**. `config.json`에 올바른 `sensitive_id`를 입력해야 합니다",
60
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**API 사용량 가져오기 실패**. sensitive_id가 잘못되었거나 만료되었습니다",
61
+ "**本月使用金额** ": "**이번 달 사용금액** ",
62
+ "本月使用金额": "이번 달 사용금액",
63
+ "获取API使用情况失败:": "API 사용량 가져오기 실패:",
64
+ "API密钥更改为了": "API 키가 변경되었습니다.",
65
+ "JSON解析错误,收到的内容: ": "JSON 파싱 에러, 응답: ",
66
+ "模型设置为了:": "설정된 모델: ",
67
+ "☹️发生了错误:": "☹️에러: ",
68
+ "获取对话时发生错误,请查看后台日志": "대화를 가져오는 중 에러가 발생했습니다. 백그라운드 로그를 확인하세요",
69
+ "请检查网络连接,或者API-Key是否有效。": "네트워크 연결 또는 API키가 유효한지 확인하세요",
70
+ "连接超时,无法获取对话。": "연결 시간 초과, 대화를 가져올 수 없습니다.",
71
+ "读取超时,无法获取对话。": "읽기 시간 초과, 대화를 가져올 수 없습니다.",
72
+ "代理错误,无法获取对话。": "프록시 에러, 대화를 가져올 수 없습니다.",
73
+ "SSL错误,无法获取对话。": "SSL 에러, 대화를 가져올 수 없습니다.",
74
+ "API key为空,请检查是否输入正确。": "API 키가 비어 있습니다. 올바르게 입력되었는지 확인하십세요.",
75
+ "请输入对话内容。": "대화 내용을 입력하세요.",
76
+ "账单信息不适用": "청구 정보를 가져올 수 없습니다",
77
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "제작: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452), [Keldos](https://github.com/Keldos-Li)\n\n최신 코드 다운로드: [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
78
+ "切换亮暗色主题": "라이트/다크 테마 전환",
79
+ "您的IP区域:未知。": "IP 지역: 알 수 없음.",
80
+ "获取IP地理位置失败。原因:": "다음과 같은 이유로 IP 위치를 가져올 수 없습니다. 이유: ",
81
+ "。你仍然可以使用聊天功能。": ". 채팅 기능을 계속 사용할 수 있습니다.",
82
+ "您的IP区域:": "당신의 IP 지역: ",
83
+ "总结": "요약",
84
+ "生成内容总结中……": "콘텐츠 요약 생성중...",
85
+ "上传": "업로드",
86
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "구글은 다음과 같은 이유로 인해 PaLM의 응답을 거부합니다: \n\n",
87
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ API-Key의 안전을 보장하기 위해 네트워크 설정을 `config.json` 구성 파일에서 수정해주세요.",
88
+ "网络参数": "네트워크 매개변수"
89
+ }
locale/sv-SE.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "未命名对话历史记录": "Onämnd Dialoghistorik",
3
+ "在这里输入": "Skriv in här",
4
+ "🧹 新的对话": "🧹 Ny Dialog",
5
+ "🔄 重新生成": "🔄 Regenerera",
6
+ "🗑️ 删除最旧对话": "🗑️ Ta bort äldsta dialogen",
7
+ "🗑️ 删除最新对话": "🗑️ Ta bort senaste dialogen",
8
+ "模型": "Modell",
9
+ "多账号模式已开启,无需输入key,可直接开始对话": "Flerkontoläge är aktiverat, ingen nyckel behövs, du kan starta dialogen direkt",
10
+ "**发送消息** 或 **提交key** 以显示额度": "**Skicka meddelande** eller **Skicka in nyckel** för att visa kredit",
11
+ "选择模型": "Välj Modell",
12
+ "选择LoRA模型": "Välj LoRA Modell",
13
+ "实时传输回答": "Strömmande utdata",
14
+ "单轮对话": "Enkel dialog",
15
+ "使用在线搜索": "Använd online-sökning",
16
+ "选择回复语言(针对搜索&索引功能)": "Välj svarspråk (för sök- och indexfunktion)",
17
+ "上传索引文件": "Ladda upp",
18
+ "双栏pdf": "Två-kolumns pdf",
19
+ "识别公式": "Formel OCR",
20
+ "在这里输入System Prompt...": "Skriv in System Prompt här...",
21
+ "加载Prompt模板": "Ladda Prompt-mall",
22
+ "选择Prompt模板集合文件": "Välj Prompt-mall Samlingsfil",
23
+ "🔄 刷新": "🔄 Uppdatera",
24
+ "从Prompt模板中加载": "Ladda från Prompt-mall",
25
+ "保存/加载": "Spara/Ladda",
26
+ "保存/加载对话历史记录": "Spara/Ladda Dialoghistorik",
27
+ "从列表中加载对话": "Ladda dialog från lista",
28
+ "设置文件名: 默认为.json,可选为.md": "Ställ in filnamn: standard är .json, valfritt är .md",
29
+ "设置保存文件名": "Ställ in sparfilnamn",
30
+ "对话历史记录": "Dialoghistorik",
31
+ "💾 保存对话": "💾 Spara Dialog",
32
+ "📝 导出为Markdown": "📝 Exportera som Markdown",
33
+ "默认保存于history文件夹": "Sparas som standard i mappen history",
34
+ "高级": "Avancerat",
35
+ "# ⚠️ 务必谨慎更改 ⚠️": "# ⚠️ Var försiktig med ändringar. ⚠️",
36
+ "参数": "Parametrar",
37
+ "停止符,用英文逗号隔开...": "Skriv in stopptecken här, separerade med kommatecken...",
38
+ "用于定位滥用行为": "Används för att lokalisera missbruk",
39
+ "用户名": "Användarnamn",
40
+ "在这里输入API-Host...": "Skriv in API-Host här...",
41
+ "🔄 切换API地址": "🔄 Byt API-adress",
42
+ "未设置代理...": "Inte inställd proxy...",
43
+ "代理地址": "Proxyadress",
44
+ "🔄 设置代理地址": "🔄 Ställ in Proxyadress",
45
+ "🔙 恢复网络默认设置": "🔙 Återställ Nätverksinställningar",
46
+ "🔄 检查更新...": "🔄 Sök efter uppdateringar...",
47
+ "取消": "Avbryt",
48
+ "更新": "Uppdatera",
49
+ "详情": "Detaljer",
50
+ "好": "OK",
51
+ "更新成功,请重启本程序": "Uppdaterat framgångsrikt, starta om programmet",
52
+ "更新失败,请尝试[手动更新](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)": "Uppdateringen misslyckades, prova att [uppdatera manuellt](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程#手动更新)",
53
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
54
+ "开始实时传输回答……": "Börjar strömma utdata...",
55
+ "Token 计数: ": "Tokenräkning: ",
56
+ ",本次对话累计消耗了 ": ", Total kostnad för denna dialog är ",
57
+ "**获取API使用情况失败**": "**Misslyckades med att hämta API-användning**",
58
+ "**获取API使用情况失败**,需在填写`config.json`中正确填写sensitive_id": "**Misslyckades med att hämta API-användning**, korrekt sensitive_id behövs i `config.json`",
59
+ "**获取API使用情况失败**,sensitive_id错误或已过期": "**Misslyckades med att hämta API-användning**, felaktig eller utgången sensitive_id",
60
+ "**本月使用金额** ": "**Månadens användning** ",
61
+ "本月使用金额": "Månadens användning",
62
+ "获取API使用情况失败:": "Misslyckades med att hämta API-användning:",
63
+ "API密钥更改为了": "API-nyckeln har ändrats till",
64
+ "JSON解析错误,收到的内容: ": "JSON-tolkningsfel, mottaget innehåll: ",
65
+ "模型设置为了:": "Modellen är inställd på: ",
66
+ "☹️发生了错误:": "☹️Fel: ",
67
+ "获取对话时发生错误,请查看后台日志": "Ett fel uppstod när dialogen hämtades, kontrollera bakgrundsloggen",
68
+ "请检查网络连接,或者API-Key是否有效。": "Kontrollera nätverksanslutningen eller om API-nyckeln är giltig.",
69
+ "连接超时,无法获取对话。": "Anslutningen tog för lång tid, kunde inte hämta dialogen.",
70
+ "读取超时,无法获取对话。": "Läsningen tog för lång tid, kunde inte hämta dialogen.",
71
+ "代理错误,无法获取对话。": "Proxyfel, kunde inte hämta dialogen.",
72
+ "SSL错误,无法获取对话。": "SSL-fel, kunde inte hämta dialogen.",
73
+ "API key为空,请检查是否输入正确。": "API-nyckeln är tom, kontrollera om den är korrekt inmatad.",
74
+ "请输入对话内容。": "Ange dialoginnehåll.",
75
+ "账单信息不适用": "Faktureringsinformation är inte tillämplig",
76
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "Utvecklad av Bilibili [土川虎虎虎](https://space.bilibili.com/29125536), [明昭MZhao](https://space.bilibili.com/24807452) och [Keldos](https://github.com/Keldos-Li)\n\nLadda ner senaste koden från [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
77
+ "切换亮暗色主题": "Byt ljus/mörk tema",
78
+ "您的IP区域:未知。": "Din IP-region: Okänd.",
79
+ "获取IP地理位置失败。原因:": "Misslyckades med att hämta IP-plats. Orsak: ",
80
+ "。你仍然可以使用聊天功能。": ". Du kan fortfarande använda chattfunktionen.",
81
+ "您的IP区域:": "Din IP-region: ",
82
+ "总结": "Sammanfatta",
83
+ "生成内容总结中……": "Genererar innehållssammanfattning...",
84
+ "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "På grund av följande skäl vägrar Google att ge ett svar till PaLM: \n\n",
85
+ "---\n⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置": "---\n⚠️ För att säkerställa säkerheten för API-nyckeln, vänligen ändra nätverksinställningarna i konfigurationsfilen `config.json`.",
86
+ "网络参数": "nätverksparametrar"
87
+ }
modules/.DS_Store ADDED
Binary file (6.15 kB). View file
 
modules/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (172 Bytes). View file
 
modules/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (154 Bytes). View file
 
modules/__pycache__/base_model.cpython-311.pyc ADDED
Binary file (28.7 kB). View file
 
modules/__pycache__/base_model.cpython-39.pyc ADDED
Binary file (16.3 kB). View file
 
modules/__pycache__/config.cpython-311.pyc ADDED
Binary file (13.3 kB). View file
 
modules/__pycache__/config.cpython-39.pyc ADDED
Binary file (5.3 kB). View file
 
modules/__pycache__/index_func.cpython-311.pyc ADDED
Binary file (8.13 kB). View file
 
modules/__pycache__/index_func.cpython-39.pyc ADDED
Binary file (4.52 kB). View file
 
modules/__pycache__/llama_func.cpython-311.pyc ADDED
Binary file (9.44 kB). View file
 
modules/__pycache__/llama_func.cpython-39.pyc ADDED
Binary file (4.85 kB). View file
 
modules/__pycache__/models.cpython-311.pyc ADDED
Binary file (31.2 kB). View file
 
modules/__pycache__/models.cpython-39.pyc ADDED
Binary file (17.5 kB). View file
 
modules/__pycache__/overwrites.cpython-311.pyc ADDED
Binary file (5.01 kB). View file
 
modules/__pycache__/overwrites.cpython-39.pyc ADDED
Binary file (3.41 kB). View file
 
modules/__pycache__/pdf_func.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
modules/__pycache__/pdf_func.cpython-39.pyc ADDED
Binary file (6.13 kB). View file
 
modules/__pycache__/presets.cpython-311.pyc ADDED
Binary file (7.87 kB). View file
 
modules/__pycache__/presets.cpython-39.pyc ADDED
Binary file (6.27 kB). View file
 
modules/__pycache__/repo.cpython-311.pyc ADDED
Binary file (14.1 kB). View file
 
modules/__pycache__/shared.cpython-311.pyc ADDED
Binary file (3.89 kB). View file
 
modules/__pycache__/shared.cpython-39.pyc ADDED
Binary file (2.42 kB). View file
 
modules/__pycache__/train_func.cpython-311.pyc ADDED
Binary file (11.7 kB). View file
 
modules/__pycache__/utils.cpython-311.pyc ADDED
Binary file (40.9 kB). View file
 
modules/__pycache__/utils.cpython-39.pyc ADDED
Binary file (23.7 kB). View file
 
modules/__pycache__/webui.cpython-311.pyc ADDED
Binary file (5.45 kB). View file
 
modules/__pycache__/webui_locale.cpython-311.pyc ADDED
Binary file (2.23 kB). View file
 
modules/__pycache__/webui_locale.cpython-39.pyc ADDED
Binary file (1.14 kB). View file
 
modules/config.py CHANGED
@@ -16,7 +16,6 @@ __all__ = [
16
  "auth_list",
17
  "dockerflag",
18
  "retrieve_proxy",
19
- "log_level",
20
  "advance_docs",
21
  "update_doc_config",
22
  "usage_limit",
@@ -92,10 +91,15 @@ os.environ["OPENAI_API_KEY"] = my_api_key
92
  os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
93
 
94
  if config.get("legacy_api_usage", False):
 
 
95
  sensitive_id = config.get("sensitive_id", "")
96
  sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
97
- else:
98
- sensitive_id = my_api_key
 
 
 
99
 
100
  google_palm_api_key = config.get("google_palm_api_key", "")
101
  google_palm_api_key = os.environ.get(
@@ -110,6 +114,15 @@ os.environ["MINIMAX_API_KEY"] = minimax_api_key
110
  minimax_group_id = config.get("minimax_group_id", "")
111
  os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
112
 
 
 
 
 
 
 
 
 
 
113
  load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
114
  "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
115
 
@@ -155,12 +168,6 @@ def retrieve_openai_api(api_key=None):
155
  os.environ["OPENAI_API_KEY"] = old_api_key
156
 
157
 
158
- # 处理log
159
- log_level = config.get("log_level", "INFO")
160
- logging.basicConfig(
161
- level=log_level,
162
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
163
- )
164
 
165
  # 处理代理:
166
  http_proxy = os.environ.get("HTTP_PROXY", "")
@@ -267,3 +274,7 @@ except ValueError:
267
  pass
268
 
269
  share = config.get("share", False)
 
 
 
 
 
16
  "auth_list",
17
  "dockerflag",
18
  "retrieve_proxy",
 
19
  "advance_docs",
20
  "update_doc_config",
21
  "usage_limit",
 
91
  os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key
92
 
93
  if config.get("legacy_api_usage", False):
94
+ sensitive_id = my_api_key
95
+ else:
96
  sensitive_id = config.get("sensitive_id", "")
97
  sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id)
98
+
99
+ # 模型配置
100
+ if "extra_models" in config:
101
+ presets.MODELS.extend(config["extra_models"])
102
+ logging.info(f"已添加额外的模型:{config['extra_models']}")
103
 
104
  google_palm_api_key = config.get("google_palm_api_key", "")
105
  google_palm_api_key = os.environ.get(
 
114
  minimax_group_id = config.get("minimax_group_id", "")
115
  os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
116
 
117
+ midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
118
+ os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
119
+ midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
120
+ os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
121
+ midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
122
+ os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
123
+ midjourney_temp_folder = config.get("midjourney_temp_folder", "")
124
+ os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
125
+
126
  load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
127
  "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
128
 
 
168
  os.environ["OPENAI_API_KEY"] = old_api_key
169
 
170
 
 
 
 
 
 
 
171
 
172
  # 处理代理:
173
  http_proxy = os.environ.get("HTTP_PROXY", "")
 
274
  pass
275
 
276
  share = config.get("share", False)
277
+
278
+ # avatar
279
+ bot_avatar = config.get("bot_avatar", "default")
280
+ user_avatar = config.get("user_avatar", "default")
modules/index_func.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import logging
3
 
4
- import colorama
5
  import PyPDF2
6
  from tqdm import tqdm
7
 
@@ -10,19 +10,6 @@ from modules.utils import *
10
  from modules.config import local_embedding
11
 
12
 
13
- def get_index_name(file_src):
14
- file_paths = [x.name for x in file_src]
15
- file_paths.sort(key=lambda x: os.path.basename(x))
16
-
17
- md5_hash = hashlib.md5()
18
- for file_path in file_paths:
19
- with open(file_path, "rb") as f:
20
- while chunk := f.read(8192):
21
- md5_hash.update(chunk)
22
-
23
- return md5_hash.hexdigest()
24
-
25
-
26
  def get_documents(file_src):
27
  from langchain.schema import Document
28
  from langchain.text_splitter import TokenTextSplitter
@@ -113,7 +100,7 @@ def construct_index(
113
  embedding_limit = None if embedding_limit == 0 else embedding_limit
114
  separator = " " if separator == "" else separator
115
 
116
- index_name = get_index_name(file_src)
117
  index_path = f"./index/{index_name}"
118
  if local_embedding:
119
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
 
1
  import os
2
  import logging
3
 
4
+ import hashlib
5
  import PyPDF2
6
  from tqdm import tqdm
7
 
 
10
  from modules.config import local_embedding
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def get_documents(file_src):
14
  from langchain.schema import Document
15
  from langchain.text_splitter import TokenTextSplitter
 
100
  embedding_limit = None if embedding_limit == 0 else embedding_limit
101
  separator = " " if separator == "" else separator
102
 
103
+ index_name = get_file_hash(file_src)
104
  index_path = f"./index/{index_name}"
105
  if local_embedding:
106
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
modules/models/__pycache__/base_model.cpython-311.pyc CHANGED
Binary files a/modules/models/__pycache__/base_model.cpython-311.pyc and b/modules/models/__pycache__/base_model.cpython-311.pyc differ
 
modules/models/__pycache__/models.cpython-311.pyc CHANGED
Binary files a/modules/models/__pycache__/models.cpython-311.pyc and b/modules/models/__pycache__/models.cpython-311.pyc differ
 
modules/models/base_model.py CHANGED
@@ -141,6 +141,7 @@ class ModelType(Enum):
141
  ChuanhuAgent = 8
142
  GooglePaLM = 9
143
  LangchainChat = 10
 
144
 
145
  @classmethod
146
  def get_type(cls, model_name: str):
@@ -166,7 +167,9 @@ class ModelType(Enum):
166
  model_type = ModelType.ChuanhuAgent
167
  elif "palm" in model_name_lower:
168
  model_type = ModelType.GooglePaLM
169
- elif "azure" or "api" in model_name_lower:
 
 
170
  model_type = ModelType.LangchainChat
171
  else:
172
  model_type = ModelType.Unknown
 
141
  ChuanhuAgent = 8
142
  GooglePaLM = 9
143
  LangchainChat = 10
144
+ Midjourney = 11
145
 
146
  @classmethod
147
  def get_type(cls, model_name: str):
 
167
  model_type = ModelType.ChuanhuAgent
168
  elif "palm" in model_name_lower:
169
  model_type = ModelType.GooglePaLM
170
+ elif "midjourney" in model_name_lower:
171
+ model_type = ModelType.Midjourney
172
+ elif "azure" in model_name_lower or "api" in model_name_lower:
173
  model_type = ModelType.LangchainChat
174
  else:
175
  model_type = ModelType.Unknown
modules/models/midjourney.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import logging
5
+ import pathlib
6
+ import time
7
+ import tempfile
8
+ import os
9
+
10
+ from datetime import datetime
11
+
12
+ import requests
13
+ import tiktoken
14
+ from PIL import Image
15
+
16
+ from modules.config import retrieve_proxy
17
+ from modules.models.models import XMChat
18
+
19
+ mj_proxy_api_base = os.getenv("MIDJOURNEY_PROXY_API_BASE")
20
+ mj_discord_proxy_url = os.getenv("MIDJOURNEY_DISCORD_PROXY_URL")
21
+ mj_temp_folder = os.getenv("MIDJOURNEY_TEMP_FOLDER")
22
+
23
+
24
+ class Midjourney_Client(XMChat):
25
+
26
+ class FetchDataPack:
27
+ """
28
+ A class to store data for current fetching data from Midjourney API
29
+ """
30
+
31
+ action: str # current action, e.g. "IMAGINE", "UPSCALE", "VARIATION"
32
+ prefix_content: str # prefix content, task description and process hint
33
+ task_id: str # task id
34
+ start_time: float # task start timestamp
35
+ timeout: int # task timeout in seconds
36
+ finished: bool # whether the task is finished
37
+ prompt: str # prompt for the task
38
+
39
+ def __init__(self, action, prefix_content, task_id, timeout=900):
40
+ self.action = action
41
+ self.prefix_content = prefix_content
42
+ self.task_id = task_id
43
+ self.start_time = time.time()
44
+ self.timeout = timeout
45
+ self.finished = False
46
+
47
+ def __init__(self, model_name, api_key, user_name=""):
48
+ super().__init__(api_key, user_name)
49
+ self.model_name = model_name
50
+ self.history = []
51
+ self.api_key = api_key
52
+ self.headers = {
53
+ "Content-Type": "application/json",
54
+ "mj-api-secret": f"{api_key}"
55
+ }
56
+ self.proxy_url = mj_proxy_api_base
57
+ self.command_splitter = "::"
58
+
59
+ if mj_temp_folder:
60
+ temp = "./tmp"
61
+ if user_name:
62
+ temp = os.path.join(temp, user_name)
63
+ if not os.path.exists(temp):
64
+ os.makedirs(temp)
65
+ self.temp_path = tempfile.mkdtemp(dir=temp)
66
+ logging.info("mj temp folder: " + self.temp_path)
67
+ else:
68
+ self.temp_path = None
69
+
70
+ def use_mj_self_proxy_url(self, img_url):
71
+ """
72
+ replace discord cdn url with mj self proxy url
73
+ """
74
+ return img_url.replace(
75
+ "https://cdn.discordapp.com/",
76
+ mj_discord_proxy_url and mj_discord_proxy_url or "https://cdn.discordapp.com/"
77
+ )
78
+
79
+ def split_image(self, image_url):
80
+ """
81
+ when enabling temp dir, split image into 4 parts
82
+ """
83
+ with retrieve_proxy():
84
+ image_bytes = requests.get(image_url).content
85
+ img = Image.open(io.BytesIO(image_bytes))
86
+ width, height = img.size
87
+ # calculate half width and height
88
+ half_width = width // 2
89
+ half_height = height // 2
90
+ # create coordinates (top-left x, top-left y, bottom-right x, bottom-right y)
91
+ coordinates = [(0, 0, half_width, half_height),
92
+ (half_width, 0, width, half_height),
93
+ (0, half_height, half_width, height),
94
+ (half_width, half_height, width, height)]
95
+
96
+ images = [img.crop(c) for c in coordinates]
97
+ return images
98
+
99
+ def auth_mj(self):
100
+ """
101
+ auth midjourney api
102
+ """
103
+ # TODO: check if secret is valid
104
+ return {'status': 'ok'}
105
+
106
+ def request_mj(self, path: str, action: str, data: str, retries=3):
107
+ """
108
+ request midjourney api
109
+ """
110
+ mj_proxy_url = self.proxy_url
111
+ if mj_proxy_url is None or not (mj_proxy_url.startswith("http://") or mj_proxy_url.startswith("https://")):
112
+ raise Exception('please set MIDJOURNEY_PROXY_API_BASE in ENV or in config.json')
113
+
114
+ auth_ = self.auth_mj()
115
+ if auth_.get('error'):
116
+ raise Exception('auth not set')
117
+
118
+ fetch_url = f"{mj_proxy_url}/{path}"
119
+ # logging.info(f"[MJ Proxy] {action} {fetch_url} params: {data}")
120
+
121
+ for _ in range(retries):
122
+ try:
123
+ with retrieve_proxy():
124
+ res = requests.request(method=action, url=fetch_url, headers=self.headers, data=data)
125
+ break
126
+ except Exception as e:
127
+ print(e)
128
+
129
+ if res.status_code != 200:
130
+ raise Exception(f'{res.status_code} - {res.content}')
131
+
132
+ return res
133
+
134
+ def fetch_status(self, fetch_data: FetchDataPack):
135
+ """
136
+ fetch status of current task
137
+ """
138
+ if fetch_data.start_time + fetch_data.timeout < time.time():
139
+ fetch_data.finished = True
140
+ return "任务超时,请检查 dc 输出。描述:" + fetch_data.prompt
141
+
142
+ time.sleep(3)
143
+ status_res = self.request_mj(f"task/{fetch_data.task_id}/fetch", "GET", '')
144
+ status_res_json = status_res.json()
145
+ if not (200 <= status_res.status_code < 300):
146
+ raise Exception("任务状态获取失败:" + status_res_json.get(
147
+ 'error') or status_res_json.get('description') or '未知错误')
148
+ else:
149
+ fetch_data.finished = False
150
+ if status_res_json['status'] == "SUCCESS":
151
+ content = status_res_json['imageUrl']
152
+ fetch_data.finished = True
153
+ elif status_res_json['status'] == "FAILED":
154
+ content = status_res_json['failReason'] or '未知原因'
155
+ fetch_data.finished = True
156
+ elif status_res_json['status'] == "NOT_START":
157
+ content = f'任务未开始,已等待 {time.time() - fetch_data.start_time:.2f} 秒'
158
+ elif status_res_json['status'] == "IN_PROGRESS":
159
+ content = '任务正在运行'
160
+ if status_res_json.get('progress'):
161
+ content += f",进度:{status_res_json['progress']}"
162
+ elif status_res_json['status'] == "SUBMITTED":
163
+ content = '任务已提交处理'
164
+ elif status_res_json['status'] == "FAILURE":
165
+ fetch_data.finished = True
166
+ return "任务处理失败,原因:" + status_res_json['failReason'] or '未知原因'
167
+ else:
168
+ content = status_res_json['status']
169
+ if fetch_data.finished:
170
+ img_url = self.use_mj_self_proxy_url(status_res_json['imageUrl'])
171
+ if fetch_data.action == "DESCRIBE":
172
+ return f"\n{status_res_json['prompt']}"
173
+ time_cost_str = f"\n\n{fetch_data.action} 花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
174
+ upscale_str = ""
175
+ variation_str = ""
176
+ if fetch_data.action in ["IMAGINE", "UPSCALE", "VARIATION"]:
177
+ upscale = [f'/mj UPSCALE{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
178
+ for i in range(4)]
179
+ upscale_str = '\n放大图片:\n\n' + '\n\n'.join(upscale)
180
+ variation = [f'/mj VARIATION{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
181
+ for i in range(4)]
182
+ variation_str = '\n图片变体:\n\n' + '\n\n'.join(variation)
183
+ if self.temp_path and fetch_data.action in ["IMAGINE", "VARIATION"]:
184
+ try:
185
+ images = self.split_image(img_url)
186
+ # save images to temp path
187
+ for i in range(4):
188
+ images[i].save(pathlib.Path(self.temp_path) / f"{fetch_data.task_id}_{i}.png")
189
+ img_str = '\n'.join(
190
+ [f"![{fetch_data.task_id}](/file={self.temp_path}/{fetch_data.task_id}_{i}.png)"
191
+ for i in range(4)])
192
+ return fetch_data.prefix_content + f"{time_cost_str}\n\n{img_str}{upscale_str}{variation_str}"
193
+ except Exception as e:
194
+ logging.error(e)
195
+ return fetch_data.prefix_content + \
196
+ f"{time_cost_str}[![{fetch_data.task_id}]({img_url})]({img_url}){upscale_str}{variation_str}"
197
+ else:
198
+ content = f"**任务状态:** [{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - {content}"
199
+ content += f"\n\n花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
200
+ if status_res_json['status'] == 'IN_PROGRESS' and status_res_json.get('imageUrl'):
201
+ img_url = status_res_json.get('imageUrl')
202
+ return f"{content}\n[![{fetch_data.task_id}]({img_url})]({img_url})"
203
+ return content
204
+ return None
205
+
206
+ def handle_file_upload(self, files, chatbot, language):
207
+ """
208
+ handle file upload
209
+ """
210
+ if files:
211
+ for file in files:
212
+ if file.name:
213
+ logging.info(f"尝试读取图像: {file.name}")
214
+ self.try_read_image(file.name)
215
+ if self.image_path is not None:
216
+ chatbot = chatbot + [((self.image_path,), None)]
217
+ if self.image_bytes is not None:
218
+ logging.info("使用图片作为输入")
219
+ return None, chatbot, None
220
+
221
+ def reset(self):
222
+ self.image_bytes = None
223
+ self.image_path = None
224
+ return [], "已重置"
225
+
226
+ def get_answer_at_once(self):
227
+ content = self.history[-1]['content']
228
+ answer = self.get_help()
229
+
230
+ if not content.lower().startswith("/mj"):
231
+ return answer, len(content)
232
+
233
+ prompt = content[3:].strip()
234
+ action = "IMAGINE"
235
+ first_split_index = prompt.find(self.command_splitter)
236
+ if first_split_index > 0:
237
+ action = prompt[:first_split_index]
238
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
239
+ # "VARIATION", "BLEND", "REROLL"
240
+ ]:
241
+ raise Exception("任务提交失败:未知的任务类型")
242
+ else:
243
+ action_index = None
244
+ action_use_task_id = None
245
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
246
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
247
+ action_use_task_id = prompt[first_split_index + 5:]
248
+
249
+ try:
250
+ res = None
251
+ if action == "IMAGINE":
252
+ data = {
253
+ "prompt": prompt
254
+ }
255
+ if self.image_bytes is not None:
256
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
257
+ res = self.request_mj("submit/imagine", "POST",
258
+ json.dumps(data))
259
+ elif action == "DESCRIBE":
260
+ res = self.request_mj("submit/describe", "POST",
261
+ json.dumps({"base64": 'data:image/png;base64,' + self.image_bytes}))
262
+ elif action == "BLEND":
263
+ res = self.request_mj("submit/blend", "POST", json.dumps(
264
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
265
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
266
+ res = self.request_mj(
267
+ "submit/change", "POST",
268
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
269
+ res_json = res.json()
270
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
271
+ answer = "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
272
+ else:
273
+ task_id = res_json['result']
274
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
275
+
276
+ fetch_data = Midjourney_Client.FetchDataPack(
277
+ action=action,
278
+ prefix_content=prefix_content,
279
+ task_id=task_id,
280
+ )
281
+ fetch_data.prompt = prompt
282
+ while not fetch_data.finished:
283
+ answer = self.fetch_status(fetch_data)
284
+ except Exception as e:
285
+ logging.error("submit failed", e)
286
+ answer = "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
287
+
288
+ return answer, tiktoken.get_encoding("cl100k_base").encode(content)
289
+
290
+ def get_answer_stream_iter(self):
291
+ content = self.history[-1]['content']
292
+ answer = self.get_help()
293
+
294
+ if not content.lower().startswith("/mj"):
295
+ yield answer
296
+ return
297
+
298
+ prompt = content[3:].strip()
299
+ action = "IMAGINE"
300
+ first_split_index = prompt.find(self.command_splitter)
301
+ if first_split_index > 0:
302
+ action = prompt[:first_split_index]
303
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
304
+ "VARIATION", "BLEND", "REROLL"
305
+ ]:
306
+ yield "任务提交失败:未知的任务类型"
307
+ return
308
+
309
+ action_index = None
310
+ action_use_task_id = None
311
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
312
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
313
+ action_use_task_id = prompt[first_split_index + 5:]
314
+
315
+ try:
316
+ res = None
317
+ if action == "IMAGINE":
318
+ data = {
319
+ "prompt": prompt
320
+ }
321
+ if self.image_bytes is not None:
322
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
323
+ res = self.request_mj("submit/imagine", "POST",
324
+ json.dumps(data))
325
+ elif action == "DESCRIBE":
326
+ res = self.request_mj("submit/describe", "POST", json.dumps(
327
+ {"base64": 'data:image/png;base64,' + self.image_bytes}))
328
+ elif action == "BLEND":
329
+ res = self.request_mj("submit/blend", "POST", json.dumps(
330
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
331
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
332
+ res = self.request_mj(
333
+ "submit/change", "POST",
334
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
335
+ res_json = res.json()
336
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
337
+ yield "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
338
+ else:
339
+ task_id = res_json['result']
340
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
341
+ content = f"[{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - 任务提交成功:" + \
342
+ res_json.get('description') or '请稍等片刻'
343
+ yield content
344
+
345
+ fetch_data = Midjourney_Client.FetchDataPack(
346
+ action=action,
347
+ prefix_content=prefix_content,
348
+ task_id=task_id,
349
+ )
350
+ while not fetch_data.finished:
351
+ yield self.fetch_status(fetch_data)
352
+ except Exception as e:
353
+ logging.error('submit failed', e)
354
+ yield "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
355
+
356
+ def get_help(self):
357
+ return """```
358
+ 【绘图帮助】
359
+ 所有命令都需要以 /mj 开头,如:/mj a dog
360
+ IMAGINE - 绘图,可以省略该命令,后面跟上绘图内容
361
+ /mj a dog
362
+ /mj IMAGINE::a cat
363
+ DESCRIBE - 描述图片,需要在右下角上传需要描述的图片内容
364
+ /mj DESCRIBE::
365
+ UPSCALE - 确认后放大图片,第一个数值为需要放大的图片(1~4),第二参数为任务ID
366
+ /mj UPSCALE::1::123456789
367
+ 请使用SD进行UPSCALE
368
+ VARIATION - 图片变体,第一个数值为需要放大的图片(1~4),第二参数为任务ID
369
+ /mj VARIATION::1::123456789
370
+
371
+ 【绘图参数】
372
+ 所有命令默认会带上参数--v 5.2
373
+ 其他参数参照 https://docs.midjourney.com/docs/parameter-list
374
+ 长宽比 --aspect/--ar
375
+ --ar 1:2
376
+ --ar 16:9
377
+ 负面tag --no
378
+ --no plants
379
+ --no hands
380
+ 随机种子 --seed
381
+ --seed 1
382
+ 生成动漫风格(NijiJourney) --niji
383
+ --niji
384
+ ```
385
+ """
modules/models/models.py CHANGED
@@ -96,6 +96,7 @@ class OpenAIClient(BaseLLMModel):
96
  # rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
97
  rounded_usage = round(usage_data["total_usage"] / 100, 5)
98
  usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
 
99
  # return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
100
  return get_html("billing_info.html").format(
101
  label = i18n("本月使用金额"),
@@ -162,7 +163,7 @@ class OpenAIClient(BaseLLMModel):
162
 
163
  # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
164
  if shared.state.completion_url != COMPLETION_URL:
165
- logging.info(f"使用自定义API URL: {shared.state.completion_url}")
166
 
167
  with retrieve_proxy():
168
  try:
@@ -208,7 +209,7 @@ class OpenAIClient(BaseLLMModel):
208
  chunk_length = len(chunk)
209
  try:
210
  chunk = json.loads(chunk[6:])
211
- except json.JSONDecodeError:
212
  print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
213
  error_msg += chunk
214
  continue
@@ -620,6 +621,10 @@ def get_model(
620
  elif model_type == ModelType.LangchainChat:
621
  from .azure import Azure_OpenAI_Client
622
  model = Azure_OpenAI_Client(model_name, user_name=user_name)
 
 
 
 
623
  elif model_type == ModelType.Unknown:
624
  raise ValueError(f"未知模型: {model_name}")
625
  logging.info(msg)
 
96
  # rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
97
  rounded_usage = round(usage_data["total_usage"] / 100, 5)
98
  usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
99
+ from ..webui import get_html
100
  # return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
101
  return get_html("billing_info.html").format(
102
  label = i18n("本月使用金额"),
 
163
 
164
  # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
165
  if shared.state.completion_url != COMPLETION_URL:
166
+ logging.debug(f"使用自定义API URL: {shared.state.completion_url}")
167
 
168
  with retrieve_proxy():
169
  try:
 
209
  chunk_length = len(chunk)
210
  try:
211
  chunk = json.loads(chunk[6:])
212
+ except:
213
  print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
214
  error_msg += chunk
215
  continue
 
621
  elif model_type == ModelType.LangchainChat:
622
  from .azure import Azure_OpenAI_Client
623
  model = Azure_OpenAI_Client(model_name, user_name=user_name)
624
+ elif model_type == ModelType.Midjourney:
625
+ from .midjourney import Midjourney_Client
626
+ mj_proxy_api_secret = os.getenv("MIDJOURNEY_PROXY_API_SECRET")
627
+ model = Midjourney_Client(model_name, mj_proxy_api_secret, user_name=user_name)
628
  elif model_type == ModelType.Unknown:
629
  raise ValueError(f"未知模型: {model_name}")
630
  logging.info(msg)
modules/presets.py CHANGED
@@ -69,6 +69,7 @@ ONLINE_MODELS = [
69
  "yuanai-1.0-rhythm_poems",
70
  "minimax-abab4-chat",
71
  "minimax-abab5-chat",
 
72
  ]
73
 
74
  LOCAL_MODELS = [
 
69
  "yuanai-1.0-rhythm_poems",
70
  "minimax-abab4-chat",
71
  "minimax-abab5-chat",
72
+ "midjourney"
73
  ]
74
 
75
  LOCAL_MODELS = [
modules/repo.py CHANGED
@@ -51,14 +51,14 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_
51
  return (result.stdout or "")
52
 
53
 
54
- def run_pip(command, desc=None, live=default_command_live):
55
  # if args.skip_install:
56
  # return
57
 
58
  index_url_line = f' --index-url {index_url}' if index_url != '' else ''
59
  return run(
60
  f'"{python}" -m pip {command} --prefer-binary{index_url_line}',
61
- desc=f"Installing {desc}...",
62
  errdesc=f"Couldn't install {desc}",
63
  live=live
64
  )
@@ -158,6 +158,12 @@ def get_tag_commit_hash(tag):
158
  commit_hash = "<none>"
159
  return commit_hash
160
 
 
 
 
 
 
 
161
  def background_update():
162
  # {git} fetch --all && ({git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f || ({git} stash && {git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f && {git} stash pop)) && {pip} install -r requirements.txt")
163
  try:
@@ -165,47 +171,65 @@ def background_update():
165
  latest_release_tag = latest_release["tag"]
166
  latest_release_hash = get_tag_commit_hash(latest_release_tag)
167
  need_pip = latest_release["need_pip"]
 
168
 
 
169
  current_branch = get_current_branch()
170
- updater_branch = f'tmp_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}'
 
171
  track_repo = "https://github.com/GaiZhenbiao/ChuanhuChatGPT.git"
172
  try:
173
  try:
174
- run(f"{git} fetch {track_repo}", desc="Fetching from github...", live=False)
175
  except Exception:
176
- logging.error(f"Update failed in fetching")
177
  return "failed"
178
 
179
- run(f'{git} stash save -a "updater-tmp"')
180
-
 
 
181
  run(f"{git} checkout -b {updater_branch}", live=False)
182
  run(f"{git} reset --hard FETCH_HEAD", live=False)
183
- run(f"{git} reset --hard {latest_release_hash}", desc=f'Checking out {latest_release_tag}...')
184
  run(f"{git} checkout {current_branch}", live=False)
185
 
186
  try:
187
- run(f"{git} merge {updater_branch} -q", desc="Trying to apply latest update...")
188
  except Exception:
189
  logging.error(f"Update failed in merging")
190
  try:
191
- run(f"{git} merge --abort", desc="Canceling update...")
192
- run(f"{git} reset --hard {current_branch}", live=False)
193
- run(f"{git} stash pop", live=False)
194
  run(f"{git} branch -D -f {updater_branch}", live=False)
 
 
195
  logging.error(f"Update failed, but your file was safely reset to the state before the update.")
196
  return "failed"
197
  except Exception as e:
198
- logging.error(f"!!!Update failed in resetting, try to reset your files manually.")
199
  return "failed"
200
-
201
- run(f"{git} stash pop", live=False)
 
 
 
 
 
 
 
 
 
 
 
202
  run(f"{git} branch -D -f {updater_branch}", live=False)
 
203
  except Exception as e:
204
  logging.error(f"Update failed: {e}")
205
  return "failed"
206
  if need_pip:
207
  try:
208
- run_pip(f"install -r requirements.txt", "requirements")
209
  except Exception:
210
  logging.error(f"Update failed in pip install")
211
  return "failed"
 
51
  return (result.stdout or "")
52
 
53
 
54
+ def run_pip(command, desc=None, pref=None, live=default_command_live):
55
  # if args.skip_install:
56
  # return
57
 
58
  index_url_line = f' --index-url {index_url}' if index_url != '' else ''
59
  return run(
60
  f'"{python}" -m pip {command} --prefer-binary{index_url_line}',
61
+ desc=f"{pref} Installing {desc}...",
62
  errdesc=f"Couldn't install {desc}",
63
  live=live
64
  )
 
158
  commit_hash = "<none>"
159
  return commit_hash
160
 
161
+ def repo_need_stash():
162
+ try:
163
+ return subprocess.check_output([git, "diff-index", "--quiet", "HEAD", "--"], shell=False, encoding='utf8').strip() != ""
164
+ except Exception:
165
+ return True
166
+
167
  def background_update():
168
  # {git} fetch --all && ({git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f || ({git} stash && {git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f && {git} stash pop)) && {pip} install -r requirements.txt")
169
  try:
 
171
  latest_release_tag = latest_release["tag"]
172
  latest_release_hash = get_tag_commit_hash(latest_release_tag)
173
  need_pip = latest_release["need_pip"]
174
+ need_stash = repo_need_stash()
175
 
176
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
177
  current_branch = get_current_branch()
178
+ updater_branch = f'tmp_{timestamp}'
179
+ backup_branch = f'backup_{timestamp}'
180
  track_repo = "https://github.com/GaiZhenbiao/ChuanhuChatGPT.git"
181
  try:
182
  try:
183
+ run(f"{git} fetch {track_repo}", desc="[Updater] Fetching from github...", live=False)
184
  except Exception:
185
+ logging.error(f"Update failed in fetching, check your network connection")
186
  return "failed"
187
 
188
+ run(f'{git} stash push --include-untracked -m "updater-{timestamp}"',
189
+ desc=f'[Updater] Restoring you local changes on stash updater-{timestamp}', live=False) if need_stash else None
190
+
191
+ run(f"{git} checkout -b {backup_branch}", live=False)
192
  run(f"{git} checkout -b {updater_branch}", live=False)
193
  run(f"{git} reset --hard FETCH_HEAD", live=False)
194
+ run(f"{git} reset --hard {latest_release_hash}", desc=f'[Updater] Checking out {latest_release_tag}...', live=False)
195
  run(f"{git} checkout {current_branch}", live=False)
196
 
197
  try:
198
+ run(f"{git} merge --no-edit {updater_branch} -q", desc=f"[Updater] Trying to apply latest update on version {latest_release_tag}...")
199
  except Exception:
200
  logging.error(f"Update failed in merging")
201
  try:
202
+ run(f"{git} merge --abort", desc="[Updater] Conflict detected, canceling update...")
203
+ run(f"{git} reset --hard {backup_branch}", live=False)
 
204
  run(f"{git} branch -D -f {updater_branch}", live=False)
205
+ run(f"{git} branch -D -f {backup_branch}", live=False)
206
+ run(f"{git} stash pop", live=False) if need_stash else None
207
  logging.error(f"Update failed, but your file was safely reset to the state before the update.")
208
  return "failed"
209
  except Exception as e:
210
+ logging.error(f"!!!Update failed in resetting, try to reset your files manually. {e}")
211
  return "failed"
212
+
213
+ if need_stash:
214
+ try:
215
+ run(f"{git} stash apply", desc="[Updater] Trying to restore your local modifications...", live=False)
216
+ except Exception:
217
+ run(f"{git} reset --hard {backup_branch}", desc="[Updater] Conflict detected, canceling update...", live=False)
218
+ run(f"{git} branch -D -f {updater_branch}", live=False)
219
+ run(f"{git} branch -D -f {backup_branch}", live=False)
220
+ run(f"{git} stash pop", live=False)
221
+ logging.error(f"Update failed in applying your local changes, but your file was safely reset to the state before the update.")
222
+ return "failed"
223
+ run(f"{git} stash drop", live=False)
224
+
225
  run(f"{git} branch -D -f {updater_branch}", live=False)
226
+ run(f"{git} branch -D -f {backup_branch}", live=False)
227
  except Exception as e:
228
  logging.error(f"Update failed: {e}")
229
  return "failed"
230
  if need_pip:
231
  try:
232
+ run_pip(f"install -r requirements.txt", pref="[Updater]", desc="requirements", live=False)
233
  except Exception:
234
  logging.error(f"Update failed in pip install")
235
  return "failed"
modules/train_func.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import traceback
4
+
5
+ import openai
6
+ import gradio as gr
7
+ import ujson as json
8
+ import commentjson
9
+ import openpyxl
10
+
11
+ import modules.presets as presets
12
+ from modules.utils import get_file_hash, count_token
13
+ from modules.presets import i18n
14
+
15
+ def excel_to_jsonl(filepath, preview=False):
16
+ # 打开Excel文件
17
+ workbook = openpyxl.load_workbook(filepath)
18
+
19
+ # 获取第一个工作表
20
+ sheet = workbook.active
21
+
22
+ # 获取所有行数据
23
+ data = []
24
+ for row in sheet.iter_rows(values_only=True):
25
+ data.append(row)
26
+
27
+ # 构建字典列表
28
+ headers = data[0]
29
+ jsonl = []
30
+ for row in data[1:]:
31
+ row_data = dict(zip(headers, row))
32
+ if any(row_data.values()):
33
+ jsonl.append(row_data)
34
+ formatted_jsonl = []
35
+ for i in jsonl:
36
+ if "提问" in i and "答案" in i:
37
+ if "系统" in i :
38
+ formatted_jsonl.append({
39
+ "messages":[
40
+ {"role": "system", "content": i["系统"]},
41
+ {"role": "user", "content": i["提问"]},
42
+ {"role": "assistant", "content": i["答案"]}
43
+ ]
44
+ })
45
+ else:
46
+ formatted_jsonl.append({
47
+ "messages":[
48
+ {"role": "user", "content": i["提问"]},
49
+ {"role": "assistant", "content": i["答案"]}
50
+ ]
51
+ })
52
+ else:
53
+ logging.warning(f"跳过一行数据,因为没有找到提问和答案: {i}")
54
+ return formatted_jsonl
55
+
56
+ def jsonl_save_to_disk(jsonl, filepath):
57
+ file_hash = get_file_hash(file_paths = [filepath])
58
+ os.makedirs("files", exist_ok=True)
59
+ save_path = f"files/{file_hash}.jsonl"
60
+ with open(save_path, "w") as f:
61
+ f.write("\n".join([json.dumps(i, ensure_ascii=False) for i in jsonl]))
62
+ return save_path
63
+
64
+ def estimate_cost(ds):
65
+ dialogues = []
66
+ for l in ds:
67
+ for m in l["messages"]:
68
+ dialogues.append(m["content"])
69
+ dialogues = "\n".join(dialogues)
70
+ tokens = count_token(dialogues)
71
+ return f"Token 数约为 {tokens},预估每轮(epoch)费用约为 {tokens / 1000 * 0.008} 美元。"
72
+
73
+
74
+ def handle_dataset_selection(file_src):
75
+ logging.info(f"Loading dataset {file_src.name}...")
76
+ preview = ""
77
+ if file_src.name.endswith(".jsonl"):
78
+ with open(file_src.name, "r") as f:
79
+ ds = [json.loads(l) for l in f.readlines()]
80
+ else:
81
+ ds = excel_to_jsonl(file_src.name)
82
+ preview = ds[0]
83
+
84
+ return preview, gr.update(interactive=True), estimate_cost(ds)
85
+
86
+ def upload_to_openai(file_src):
87
+ openai.api_key = os.getenv("OPENAI_API_KEY")
88
+ dspath = file_src.name
89
+ msg = ""
90
+ logging.info(f"Uploading dataset {dspath}...")
91
+ if dspath.endswith(".xlsx"):
92
+ jsonl = excel_to_jsonl(dspath)
93
+ dspath = jsonl_save_to_disk(jsonl, dspath)
94
+ try:
95
+ uploaded = openai.File.create(
96
+ file=open(dspath, "rb"),
97
+ purpose='fine-tune'
98
+ )
99
+ return uploaded.id, f"上传成功"
100
+ except Exception as e:
101
+ traceback.print_exc()
102
+ return "", f"上传失败,原因:{ e }"
103
+
104
+ def build_event_description(id, status, trained_tokens, name=i18n("暂时未知")):
105
+ # convert to markdown
106
+ return f"""
107
+ #### 训练任务 {id}
108
+
109
+ 模型名称:{name}
110
+
111
+ 状态:{status}
112
+
113
+ 已经训练了 {trained_tokens} 个token
114
+ """
115
+
116
+ def start_training(file_id, suffix, epochs):
117
+ openai.api_key = os.getenv("OPENAI_API_KEY")
118
+ try:
119
+ job = openai.FineTuningJob.create(training_file=file_id, model="gpt-3.5-turbo", suffix=suffix, hyperparameters={"n_epochs": epochs})
120
+ return build_event_description(job.id, job.status, job.trained_tokens)
121
+ except Exception as e:
122
+ traceback.print_exc()
123
+ if "is not ready" in str(e):
124
+ return "训练出错,因为文件还没准备好。OpenAI 需要一点时间准备文件,过几分钟再来试试。"
125
+ return f"训练失败,原因:{ e }"
126
+
127
+ def get_training_status():
128
+ openai.api_key = os.getenv("OPENAI_API_KEY")
129
+ active_jobs = [build_event_description(job["id"], job["status"], job["trained_tokens"], job["fine_tuned_model"]) for job in openai.FineTuningJob.list(limit=10)["data"] if job["status"] != "cancelled"]
130
+ return "\n\n".join(active_jobs), gr.update(interactive=True) if len(active_jobs) > 0 else gr.update(interactive=False)
131
+
132
+ def handle_dataset_clear():
133
+ return gr.update(value=None), gr.update(interactive=False)
134
+
135
+ def add_to_models():
136
+ openai.api_key = os.getenv("OPENAI_API_KEY")
137
+ succeeded_jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] == "succeeded"]
138
+ extra_models = [job["fine_tuned_model"] for job in succeeded_jobs]
139
+ for i in extra_models:
140
+ if i not in presets.MODELS:
141
+ presets.MODELS.append(i)
142
+
143
+ with open('config.json', 'r') as f:
144
+ data = commentjson.load(f)
145
+ if 'extra_models' in data:
146
+ for i in extra_models:
147
+ if i not in data['extra_models']:
148
+ data['extra_models'].append(i)
149
+ else:
150
+ data['extra_models'] = extra_models
151
+ with open('config.json', 'w') as f:
152
+ commentjson.dump(data, f, indent=4)
153
+
154
+ return gr.update(choices=presets.MODELS), f"成功添加了 {len(succeeded_jobs)} 个模型。"
155
+
156
+ def cancel_all_jobs():
157
+ openai.api_key = os.getenv("OPENAI_API_KEY")
158
+ jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] not in ["cancelled", "succeeded"]]
159
+ for job in jobs:
160
+ openai.FineTuningJob.cancel(job["id"])
161
+ return f"成功取消了 {len(jobs)} 个训练任务。"
modules/utils.py CHANGED
@@ -2,17 +2,14 @@
2
  from __future__ import annotations
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
  import logging
5
- import json
6
  import os
7
  import datetime
8
- from datetime import timezone
9
- import hashlib
10
  import csv
11
  import requests
12
  import re
13
  import html
14
- import sys
15
- import subprocess
16
 
17
  import gradio as gr
18
  from pypinyin import lazy_pinyin
@@ -129,9 +126,10 @@ def dislike(current_model, *args):
129
  return current_model.dislike(*args)
130
 
131
 
132
- def count_token(message):
133
  encoding = tiktoken.get_encoding("cl100k_base")
134
- input_str = f"role: {message['role']}, content: {message['content']}"
 
135
  length = len(encoding.encode(input_str))
136
  return length
137
 
@@ -241,7 +239,7 @@ def convert_bot_before_marked(chat_message):
241
  code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
242
  code_blocks = code_block_pattern.findall(chat_message)
243
  non_code_parts = code_block_pattern.split(chat_message)[::2]
244
- result = []
245
  for non_code, code in zip(non_code_parts, code_blocks + [""]):
246
  if non_code.strip():
247
  result.append(non_code)
@@ -542,10 +540,10 @@ def transfer_input(inputs):
542
  def update_chuanhu():
543
  from .repo import background_update
544
 
545
- print("Trying to update...")
546
  update_status = background_update()
547
  if update_status == "success":
548
- print("Successfully updated, restart needed")
549
  status = '<span id="update-status" class="hideK">success</span>'
550
  return gr.Markdown.update(value=i18n("更新成功,请重启本程序")+status)
551
  else:
@@ -658,3 +656,28 @@ def beautify_err_msg(err_msg):
658
  if "Resource not found" in err_msg:
659
  return i18n("请查看 config_example.json,配置 Azure OpenAI")
660
  return err_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from __future__ import annotations
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
  import logging
5
+ import commentjson as json
6
  import os
7
  import datetime
 
 
8
  import csv
9
  import requests
10
  import re
11
  import html
12
+ import hashlib
 
13
 
14
  import gradio as gr
15
  from pypinyin import lazy_pinyin
 
126
  return current_model.dislike(*args)
127
 
128
 
129
+ def count_token(input_str):
130
  encoding = tiktoken.get_encoding("cl100k_base")
131
+ if type(input_str) == dict:
132
+ input_str = f"role: {input_str['role']}, content: {input_str['content']}"
133
  length = len(encoding.encode(input_str))
134
  return length
135
 
 
239
  code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
240
  code_blocks = code_block_pattern.findall(chat_message)
241
  non_code_parts = code_block_pattern.split(chat_message)[::2]
242
+ result = []
243
  for non_code, code in zip(non_code_parts, code_blocks + [""]):
244
  if non_code.strip():
245
  result.append(non_code)
 
540
  def update_chuanhu():
541
  from .repo import background_update
542
 
543
+ print("[Updater] Trying to update...")
544
  update_status = background_update()
545
  if update_status == "success":
546
+ logging.info("Successfully updated, restart needed")
547
  status = '<span id="update-status" class="hideK">success</span>'
548
  return gr.Markdown.update(value=i18n("更新成功,请重启本程序")+status)
549
  else:
 
656
  if "Resource not found" in err_msg:
657
  return i18n("请查看 config_example.json,配置 Azure OpenAI")
658
  return err_msg
659
+
660
+ def auth_from_conf(username, password):
661
+ try:
662
+ with open("config.json", encoding="utf-8") as f:
663
+ conf = json.load(f)
664
+ usernames, passwords = [i[0] for i in conf["users"]], [i[1] for i in conf["users"]]
665
+ if username in usernames:
666
+ if passwords[usernames.index(username)] == password:
667
+ return True
668
+ return False
669
+ except:
670
+ return False
671
+
672
+ def get_file_hash(file_src=None, file_paths=None):
673
+ if file_src:
674
+ file_paths = [x.name for x in file_src]
675
+ file_paths.sort(key=lambda x: os.path.basename(x))
676
+
677
+ md5_hash = hashlib.md5()
678
+ for file_path in file_paths:
679
+ with open(file_path, "rb") as f:
680
+ while chunk := f.read(8192):
681
+ md5_hash.update(chunk)
682
+
683
+ return md5_hash.hexdigest()
readme/README_en.md CHANGED
@@ -6,7 +6,7 @@
6
  <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
  <div align="center">
8
  <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
- <img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
10
  </a>
11
 
12
  <p align="center">
@@ -44,6 +44,23 @@
44
  </p>
45
  </div>
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ## Usage Tips
48
 
49
  - To better control the ChatGPT, use System Prompt.
@@ -51,11 +68,11 @@
51
  - To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
52
  - To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
53
  - To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
54
- - To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`.
55
- - To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
56
  - To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
57
 
58
- ## Installation
59
 
60
  ```shell
61
  git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
@@ -87,10 +104,6 @@ When you encounter problems, you should try manually pulling the latest changes
87
  ```
88
  pip install -r requirements.txt
89
  ```
90
- 3. Update Gradio
91
- ```
92
- pip install gradio --upgrade --force-reinstall
93
- ```
94
 
95
  Generally, you can solve most problems by following these steps.
96
 
 
6
  <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
  <div align="center">
8
  <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
+ <img src="https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63" alt="Logo" height="156">
10
  </a>
11
 
12
  <p align="center">
 
44
  </p>
45
  </div>
46
 
47
+ ## Supported LLM Models
48
+
49
+ **LLM models via API**:
50
+
51
+ - [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
52
+ - [Google PaLM](https://developers.generativeai.google/products/palm)
53
+ - [Inspur Yuan 1.0](https://air.inspur.com/home)
54
+ - [MiniMax](https://api.minimax.chat/)
55
+ - [XMChat](https://github.com/MILVLG/xmchat)
56
+
57
+ **LLM models via local deployment**:
58
+
59
+ - [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
60
+ - [LLaMA](https://github.com/facebookresearch/llama)
61
+ - [StableLM](https://github.com/Stability-AI/StableLM)
62
+ - [MOSS](https://github.com/OpenLMLab/MOSS)
63
+
64
  ## Usage Tips
65
 
66
  - To better control the ChatGPT, use System Prompt.
 
68
  - To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
69
  - To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
70
  - To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
71
+ - To deploy the program onto a server, set `"server_name": "0.0.0.0", "server_port" <your port number>,` in `config.json`.
72
+ - To get a public shared link, set `"share": true,` in `config.json`. Please be noted that the program must be running in order to be accessed via a public link.
73
  - To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
74
 
75
+ ## Quickstart
76
 
77
  ```shell
78
  git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
 
104
  ```
105
  pip install -r requirements.txt
106
  ```
 
 
 
 
107
 
108
  Generally, you can solve most problems by following these steps.
109
 
readme/README_ja.md CHANGED
@@ -6,7 +6,7 @@
6
  <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
  <div align="center">
8
  <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
- <img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
10
  </a>
11
 
12
  <p align="center">
@@ -44,17 +44,34 @@
44
  </p>
45
  </div>
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ## 使う上でのTips
48
 
49
  - ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
50
  - プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
51
  - 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
52
  - 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
53
- - プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`に変更します。
54
- - 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
55
  - Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
56
 
57
- ## インストール
58
 
59
  ```shell
60
  git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
@@ -86,10 +103,6 @@ python ChuanhuChatbot.py
86
  ```
87
  pip install -r requirements.txt
88
  ```
89
- 3. Gradioを更新
90
- ```
91
- pip install gradio --upgrade --force-reinstall
92
- ```
93
 
94
  一般的に、以下の手順でほとんどの問題を解決することができます。
95
 
 
6
  <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
  <div align="center">
8
  <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
+ <img src="https://github.com/GaiZhenbiao/ChuanhuChatGPT/assets/70903329/aca3a7ec-4f1d-4667-890c-a6f47bf08f63" alt="Logo" height="156">
10
  </a>
11
 
12
  <p align="center">
 
44
  </p>
45
  </div>
46
 
47
+ ## サポートされている大規模言語モデル
48
+
49
+ **APIを通じてアクセス可能な大規模言語モデル**:
50
+
51
+ - [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4))
52
+ - [Google PaLM](https://developers.generativeai.google/products/palm)
53
+ - [Inspur Yuan 1.0](https://air.inspur.com/home)
54
+ - [MiniMax](https://api.minimax.chat/)
55
+ - [XMChat](https://github.com/MILVLG/xmchat)
56
+
57
+ **ローカルに展開された大規模言語モデル**:
58
+
59
+ - [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B))
60
+ - [LLaMA](https://github.com/facebookresearch/llama)
61
+ - [StableLM](https://github.com/Stability-AI/StableLM)
62
+ - [MOSS](https://github.com/OpenLMLab/MOSS)
63
+
64
  ## 使う上でのTips
65
 
66
  - ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
67
  - プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
68
  - 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
69
  - 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
70
+ - プログラムをサーバーに展開するには、`config.json` 内の `"server_name": "0.0.0.0", "server_port": <ポート番号>`を設定してください。
71
+ - 共有リンクを取得するには、 `config.json` 内の `"share": true` を設定してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
72
  - Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
73
 
74
+ ## クイックスタート
75
 
76
  ```shell
77
  git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
 
103
  ```
104
  pip install -r requirements.txt
105
  ```
 
 
 
 
106
 
107
  一般的に、以下の手順でほとんどの問題を解決することができます。
108
 
requirements.txt CHANGED
@@ -7,7 +7,7 @@ tqdm
7
  colorama
8
  googlesearch-python
9
  Pygments
10
- langchain==0.0.173
11
  markdown
12
  PyPDF2
13
  pdfplumber
@@ -21,7 +21,8 @@ duckduckgo-search
21
  arxiv
22
  wikipedia
23
  google.generativeai
24
- openai
25
  unstructured
26
  google-api-python-client
27
  tabulate
 
 
7
  colorama
8
  googlesearch-python
9
  Pygments
10
+ langchain==0.0.276
11
  markdown
12
  PyPDF2
13
  pdfplumber
 
21
  arxiv
22
  wikipedia
23
  google.generativeai
24
+ openai>=0.27.9
25
  unstructured
26
  google-api-python-client
27
  tabulate
28
+ ujson