Spaces:
Build error
Build error
Merge remote-tracking branch 'origin/master' into multi_language
Browse files- .gitignore +2 -1
- README.md +6 -0
- config.py +7 -2
- core_functional.py +7 -0
- crazy_functional.py +10 -0
- crazy_functions/图片生成.py +1 -0
- crazy_functions/总结音视频.py +184 -0
- crazy_functions/解析JupyterNotebook.py +1 -0
- crazy_functions/询问多个大语言模型.py +1 -0
- main.py +2 -0
- request_llm/README.md +25 -0
- request_llm/bridge_all.py +15 -2
- request_llm/bridge_newbing.py +1 -1
- request_llm/bridge_stackclaude.py +296 -0
- request_llm/requirements_slackclaude.txt +1 -0
- version +2 -2
.gitignore
CHANGED
@@ -147,4 +147,5 @@ private*
|
|
147 |
crazy_functions/test_project/pdf_and_word
|
148 |
crazy_functions/test_samples
|
149 |
request_llm/jittorllms
|
150 |
-
multi-language
|
|
|
|
147 |
crazy_functions/test_project/pdf_and_word
|
148 |
crazy_functions/test_samples
|
149 |
request_llm/jittorllms
|
150 |
+
multi-language
|
151 |
+
request_llm/moss
|
README.md
CHANGED
@@ -267,6 +267,12 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
|
267 |
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
268 |
</div>
|
269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
|
271 |
## 版本:
|
272 |
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
|
|
267 |
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
268 |
</div>
|
269 |
|
270 |
+
9. OpenAI音频解析与总结
|
271 |
+
<div align="center">
|
272 |
+
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
273 |
+
</div>
|
274 |
+
|
275 |
+
|
276 |
|
277 |
## 版本:
|
278 |
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
config.py
CHANGED
@@ -44,9 +44,10 @@ WEB_PORT = -1
|
|
44 |
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
45 |
MAX_RETRY = 2
|
46 |
|
47 |
-
#
|
48 |
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
49 |
-
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
|
|
|
50 |
|
51 |
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
52 |
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
@@ -75,3 +76,7 @@ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
|
75 |
NEWBING_COOKIES = """
|
76 |
your bing cookies here
|
77 |
"""
|
|
|
|
|
|
|
|
|
|
44 |
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
45 |
MAX_RETRY = 2
|
46 |
|
47 |
+
# 模型选择是
|
48 |
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
49 |
+
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
50 |
+
# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
51 |
|
52 |
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
53 |
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
|
|
76 |
NEWBING_COOKIES = """
|
77 |
your bing cookies here
|
78 |
"""
|
79 |
+
|
80 |
+
# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
|
81 |
+
SLACK_CLAUDE_BOT_ID = ''
|
82 |
+
SLACK_CLAUDE_USER_TOKEN = ''
|
core_functional.py
CHANGED
@@ -68,4 +68,11 @@ def get_core_functions():
|
|
68 |
"Prefix": r"请解释以下代码:" + "\n```\n",
|
69 |
"Suffix": "\n```\n",
|
70 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
}
|
|
|
68 |
"Prefix": r"请解释以下代码:" + "\n```\n",
|
69 |
"Suffix": "\n```\n",
|
70 |
},
|
71 |
+
"参考文献转Bib": {
|
72 |
+
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
73 |
+
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
74 |
+
r"Items need to be transformed:",
|
75 |
+
"Suffix": r"",
|
76 |
+
"Visible": False,
|
77 |
+
}
|
78 |
}
|
crazy_functional.py
CHANGED
@@ -246,5 +246,15 @@ def get_crazy_functions():
|
|
246 |
"Function": HotReload(图片生成)
|
247 |
},
|
248 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
###################### 第n组插件 ###########################
|
250 |
return function_plugins
|
|
|
246 |
"Function": HotReload(图片生成)
|
247 |
},
|
248 |
})
|
249 |
+
from crazy_functions.总结音视频 import 总结音视频
|
250 |
+
function_plugins.update({
|
251 |
+
"批量总结音视频(输入路径或上传压缩包)": {
|
252 |
+
"Color": "stop",
|
253 |
+
"AsButton": False,
|
254 |
+
"AdvancedArgs": True,
|
255 |
+
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
256 |
+
"Function": HotReload(总结音视频)
|
257 |
+
}
|
258 |
+
})
|
259 |
###################### 第n组插件 ###########################
|
260 |
return function_plugins
|
crazy_functions/图片生成.py
CHANGED
@@ -55,6 +55,7 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
55 |
history = [] # 清空历史,以免输入溢出
|
56 |
chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 ....."))
|
57 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
|
|
58 |
resolution = plugin_kwargs.get("advanced_arg", '256x256')
|
59 |
image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
|
60 |
chatbot.append([prompt,
|
|
|
55 |
history = [] # 清空历史,以免输入溢出
|
56 |
chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 ....."))
|
57 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
58 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
59 |
resolution = plugin_kwargs.get("advanced_arg", '256x256')
|
60 |
image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
|
61 |
chatbot.append([prompt,
|
crazy_functions/总结音视频.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf
|
2 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
+
|
4 |
+
def split_audio_file(filename, split_duration=1000):
|
5 |
+
"""
|
6 |
+
根据给定的切割时长将音频文件切割成多个片段。
|
7 |
+
|
8 |
+
Args:
|
9 |
+
filename (str): 需要被切割的音频文件名。
|
10 |
+
split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。
|
11 |
+
|
12 |
+
Returns:
|
13 |
+
filelist (list): 一个包含所有切割音频片段文件路径的列表。
|
14 |
+
|
15 |
+
"""
|
16 |
+
from moviepy.editor import AudioFileClip
|
17 |
+
import os
|
18 |
+
os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹
|
19 |
+
|
20 |
+
# 读取音频文件
|
21 |
+
audio = AudioFileClip(filename)
|
22 |
+
|
23 |
+
# 计算文件总时长和切割点
|
24 |
+
total_duration = audio.duration
|
25 |
+
split_points = list(range(0, int(total_duration), split_duration))
|
26 |
+
split_points.append(int(total_duration))
|
27 |
+
filelist = []
|
28 |
+
|
29 |
+
# 切割音频文件
|
30 |
+
for i in range(len(split_points) - 1):
|
31 |
+
start_time = split_points[i]
|
32 |
+
end_time = split_points[i + 1]
|
33 |
+
split_audio = audio.subclip(start_time, end_time)
|
34 |
+
split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
35 |
+
filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
36 |
+
|
37 |
+
audio.close()
|
38 |
+
return filelist
|
39 |
+
|
40 |
+
def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
41 |
+
import os, requests
|
42 |
+
from moviepy.editor import AudioFileClip
|
43 |
+
from request_llm.bridge_all import model_info
|
44 |
+
|
45 |
+
# 设置OpenAI密钥和模型
|
46 |
+
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
47 |
+
chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
48 |
+
|
49 |
+
whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions')
|
50 |
+
url = whisper_endpoint
|
51 |
+
headers = {
|
52 |
+
'Authorization': f"Bearer {api_key}"
|
53 |
+
}
|
54 |
+
|
55 |
+
os.makedirs('gpt_log/mp3/', exist_ok=True)
|
56 |
+
for index, fp in enumerate(file_manifest):
|
57 |
+
audio_history = []
|
58 |
+
# 提取文件扩展名
|
59 |
+
ext = os.path.splitext(fp)[1]
|
60 |
+
# 提取视频中的音频
|
61 |
+
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
62 |
+
audio_clip = AudioFileClip(fp)
|
63 |
+
audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3')
|
64 |
+
fp = f'gpt_log/mp3/output{index}.mp3'
|
65 |
+
# 调用whisper模型音频转文字
|
66 |
+
voice = split_audio_file(fp)
|
67 |
+
for j, i in enumerate(voice):
|
68 |
+
with open(i, 'rb') as f:
|
69 |
+
file_content = f.read() # 读取文件内容到内存
|
70 |
+
files = {
|
71 |
+
'file': (os.path.basename(i), file_content),
|
72 |
+
}
|
73 |
+
data = {
|
74 |
+
"model": "whisper-1",
|
75 |
+
"prompt": parse_prompt,
|
76 |
+
'response_format': "text"
|
77 |
+
}
|
78 |
+
|
79 |
+
chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
|
80 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
81 |
+
proxies, = get_conf('proxies')
|
82 |
+
response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
|
83 |
+
|
84 |
+
chatbot.append(["音频解析结果", response])
|
85 |
+
history.extend(["音频解析结果", response])
|
86 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
87 |
+
|
88 |
+
i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```'
|
89 |
+
i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。'
|
90 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
91 |
+
inputs=i_say,
|
92 |
+
inputs_show_user=i_say_show_user,
|
93 |
+
llm_kwargs=llm_kwargs,
|
94 |
+
chatbot=chatbot,
|
95 |
+
history=[],
|
96 |
+
sys_prompt=f"总结音频。音频文件名{fp}"
|
97 |
+
)
|
98 |
+
|
99 |
+
chatbot[-1] = (i_say_show_user, gpt_say)
|
100 |
+
history.extend([i_say_show_user, gpt_say])
|
101 |
+
audio_history.extend([i_say_show_user, gpt_say])
|
102 |
+
|
103 |
+
# 已经对该文章的所有片段总结完毕,如果文章被切分了
|
104 |
+
result = "".join(audio_history)
|
105 |
+
if len(audio_history) > 1:
|
106 |
+
i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。"
|
107 |
+
i_say_show_user = f'第{index + 1}段音频的主要内容:'
|
108 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
109 |
+
inputs=i_say,
|
110 |
+
inputs_show_user=i_say_show_user,
|
111 |
+
llm_kwargs=llm_kwargs,
|
112 |
+
chatbot=chatbot,
|
113 |
+
history=audio_history,
|
114 |
+
sys_prompt="总结文章。"
|
115 |
+
)
|
116 |
+
|
117 |
+
history.extend([i_say, gpt_say])
|
118 |
+
audio_history.extend([i_say, gpt_say])
|
119 |
+
|
120 |
+
res = write_results_to_file(history)
|
121 |
+
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
122 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
123 |
+
|
124 |
+
# 删除中间文件夹
|
125 |
+
import shutil
|
126 |
+
shutil.rmtree('gpt_log/mp3')
|
127 |
+
res = write_results_to_file(history)
|
128 |
+
chatbot.append(("所有音频都总结完成了吗?", res))
|
129 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
130 |
+
|
131 |
+
|
132 |
+
@CatchException
|
133 |
+
def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT):
|
134 |
+
import glob, os
|
135 |
+
|
136 |
+
# 基本信息:功能、贡献者
|
137 |
+
chatbot.append([
|
138 |
+
"函数插件功能?",
|
139 |
+
"总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"])
|
140 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
141 |
+
|
142 |
+
try:
|
143 |
+
from moviepy.editor import AudioFileClip
|
144 |
+
except:
|
145 |
+
report_execption(chatbot, history,
|
146 |
+
a=f"解析项目: {txt}",
|
147 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
|
148 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
149 |
+
return
|
150 |
+
|
151 |
+
# 清空历史,以免输入溢出
|
152 |
+
history = []
|
153 |
+
|
154 |
+
# 检测输入参数,如没有给定输入参数,直接退出
|
155 |
+
if os.path.exists(txt):
|
156 |
+
project_folder = txt
|
157 |
+
else:
|
158 |
+
if txt == "": txt = '空空如也的输入栏'
|
159 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
160 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
161 |
+
return
|
162 |
+
|
163 |
+
# 搜索需要处理的文件清单
|
164 |
+
extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac']
|
165 |
+
|
166 |
+
if txt.endswith(tuple(extensions)):
|
167 |
+
file_manifest = [txt]
|
168 |
+
else:
|
169 |
+
file_manifest = []
|
170 |
+
for extension in extensions:
|
171 |
+
file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True))
|
172 |
+
|
173 |
+
# 如果没找到任何文件
|
174 |
+
if len(file_manifest) == 0:
|
175 |
+
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
|
176 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
177 |
+
return
|
178 |
+
|
179 |
+
# 开始正式执行任务
|
180 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
181 |
+
parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文')
|
182 |
+
yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history)
|
183 |
+
|
184 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/解析JupyterNotebook.py
CHANGED
@@ -67,6 +67,7 @@ def parseNotebook(filename, enable_markdown=1):
|
|
67 |
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
68 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
69 |
|
|
|
70 |
enable_markdown = plugin_kwargs.get("advanced_arg", "1")
|
71 |
try:
|
72 |
enable_markdown = int(enable_markdown)
|
|
|
67 |
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
68 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
69 |
|
70 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
71 |
enable_markdown = plugin_kwargs.get("advanced_arg", "1")
|
72 |
try:
|
73 |
enable_markdown = int(enable_markdown)
|
crazy_functions/询问多个大语言模型.py
CHANGED
@@ -45,6 +45,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|
45 |
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
46 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
47 |
|
|
|
48 |
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
49 |
llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
50 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
|
45 |
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
46 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
47 |
|
48 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
49 |
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
50 |
llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
51 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
main.py
CHANGED
@@ -74,6 +74,7 @@ def main():
|
|
74 |
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
75 |
with gr.Row():
|
76 |
for k in functional:
|
|
|
77 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
78 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
79 |
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
@@ -144,6 +145,7 @@ def main():
|
|
144 |
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
145 |
# 基础功能区的回调函数注册
|
146 |
for k in functional:
|
|
|
147 |
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
148 |
cancel_handles.append(click_handle)
|
149 |
# 文件上传区,接收文件后与chatbot的互动
|
|
|
74 |
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
75 |
with gr.Row():
|
76 |
for k in functional:
|
77 |
+
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
78 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
79 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
80 |
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
|
|
145 |
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
146 |
# 基础功能区的回调函数注册
|
147 |
for k in functional:
|
148 |
+
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
149 |
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
150 |
cancel_handles.append(click_handle)
|
151 |
# 文件上传区,接收文件后与chatbot的互动
|
request_llm/README.md
CHANGED
@@ -13,6 +13,31 @@ LLM_MODEL = "chatglm"
|
|
13 |
`python main.py`
|
14 |
```
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
---
|
18 |
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
|
|
13 |
`python main.py`
|
14 |
```
|
15 |
|
16 |
+
## Claude-Stack
|
17 |
+
|
18 |
+
- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
|
19 |
+
- 1、SLACK_CLAUDE_BOT_ID
|
20 |
+
- 2、SLACK_CLAUDE_USER_TOKEN
|
21 |
+
|
22 |
+
- 把token加入config.py
|
23 |
+
|
24 |
+
## Newbing
|
25 |
+
|
26 |
+
- 使用cookie editor获取cookie(json)
|
27 |
+
- 把cookie(json)加入config.py (NEWBING_COOKIES)
|
28 |
+
|
29 |
+
## Moss
|
30 |
+
- 使用docker-compose
|
31 |
+
|
32 |
+
## RWKV
|
33 |
+
- 使用docker-compose
|
34 |
+
|
35 |
+
## LLAMA
|
36 |
+
- 使用docker-compose
|
37 |
+
|
38 |
+
## 盘古
|
39 |
+
- 使用docker-compose
|
40 |
+
|
41 |
|
42 |
---
|
43 |
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
request_llm/bridge_all.py
CHANGED
@@ -130,6 +130,7 @@ model_info = {
|
|
130 |
"tokenizer": tokenizer_gpt35,
|
131 |
"token_cnt": get_token_num_gpt35,
|
132 |
},
|
|
|
133 |
}
|
134 |
|
135 |
|
@@ -186,8 +187,20 @@ if "moss" in AVAIL_LLM_MODELS:
|
|
186 |
"token_cnt": get_token_num_gpt35,
|
187 |
},
|
188 |
})
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
|
193 |
def LLM_CATCH_EXCEPTION(f):
|
|
|
130 |
"tokenizer": tokenizer_gpt35,
|
131 |
"token_cnt": get_token_num_gpt35,
|
132 |
},
|
133 |
+
|
134 |
}
|
135 |
|
136 |
|
|
|
187 |
"token_cnt": get_token_num_gpt35,
|
188 |
},
|
189 |
})
|
190 |
+
if "stack-claude" in AVAIL_LLM_MODELS:
|
191 |
+
from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
|
192 |
+
from .bridge_stackclaude import predict as claude_ui
|
193 |
+
# claude
|
194 |
+
model_info.update({
|
195 |
+
"stack-claude": {
|
196 |
+
"fn_with_ui": claude_ui,
|
197 |
+
"fn_without_ui": claude_noui,
|
198 |
+
"endpoint": None,
|
199 |
+
"max_token": 8192,
|
200 |
+
"tokenizer": tokenizer_gpt35,
|
201 |
+
"token_cnt": get_token_num_gpt35,
|
202 |
+
}
|
203 |
+
})
|
204 |
|
205 |
|
206 |
def LLM_CATCH_EXCEPTION(f):
|
request_llm/bridge_newbing.py
CHANGED
@@ -153,7 +153,7 @@ class NewBingHandle(Process):
|
|
153 |
# 进入任务等待状态
|
154 |
asyncio.run(self.async_run())
|
155 |
except Exception:
|
156 |
-
tb_str = '```\n' + trimmed_format_exc() + '
|
157 |
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
158 |
self.child.send('[Fail]')
|
159 |
self.child.send('[Finish]')
|
|
|
153 |
# 进入任务等待状态
|
154 |
asyncio.run(self.async_run())
|
155 |
except Exception:
|
156 |
+
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
157 |
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
158 |
self.child.send('[Fail]')
|
159 |
self.child.send('[Finish]')
|
request_llm/bridge_stackclaude.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple
|
2 |
+
from multiprocessing import Process, Pipe
|
3 |
+
from toolbox import update_ui, get_conf, trimmed_format_exc
|
4 |
+
import threading
|
5 |
+
import importlib
|
6 |
+
import logging
|
7 |
+
import time
|
8 |
+
from toolbox import get_conf
|
9 |
+
import asyncio
|
10 |
+
load_message = "正在加载Claude组件,请稍候..."
|
11 |
+
|
12 |
+
try:
|
13 |
+
"""
|
14 |
+
========================================================================
|
15 |
+
第一部分:Slack API Client
|
16 |
+
https://github.com/yokonsan/claude-in-slack-api
|
17 |
+
========================================================================
|
18 |
+
"""
|
19 |
+
|
20 |
+
from slack_sdk.errors import SlackApiError
|
21 |
+
from slack_sdk.web.async_client import AsyncWebClient
|
22 |
+
|
23 |
+
class SlackClient(AsyncWebClient):
|
24 |
+
"""SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
|
25 |
+
|
26 |
+
属性:
|
27 |
+
- CHANNEL_ID:str类型,表示频道ID。
|
28 |
+
|
29 |
+
方法:
|
30 |
+
- open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
|
31 |
+
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
|
32 |
+
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
|
33 |
+
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
|
34 |
+
|
35 |
+
"""
|
36 |
+
CHANNEL_ID = None
|
37 |
+
|
38 |
+
async def open_channel(self):
|
39 |
+
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0])
|
40 |
+
self.CHANNEL_ID = response["channel"]["id"]
|
41 |
+
|
42 |
+
async def chat(self, text):
|
43 |
+
if not self.CHANNEL_ID:
|
44 |
+
raise Exception("Channel not found.")
|
45 |
+
|
46 |
+
resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text)
|
47 |
+
self.LAST_TS = resp["ts"]
|
48 |
+
|
49 |
+
async def get_slack_messages(self):
|
50 |
+
try:
|
51 |
+
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
52 |
+
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
53 |
+
msg = [msg for msg in resp["messages"]
|
54 |
+
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]]
|
55 |
+
return msg
|
56 |
+
except (SlackApiError, KeyError) as e:
|
57 |
+
raise RuntimeError(f"获取Slack消息失败。")
|
58 |
+
|
59 |
+
async def get_reply(self):
|
60 |
+
while True:
|
61 |
+
slack_msgs = await self.get_slack_messages()
|
62 |
+
if len(slack_msgs) == 0:
|
63 |
+
await asyncio.sleep(0.5)
|
64 |
+
continue
|
65 |
+
|
66 |
+
msg = slack_msgs[-1]
|
67 |
+
if msg["text"].endswith("Typing…_"):
|
68 |
+
yield False, msg["text"]
|
69 |
+
else:
|
70 |
+
yield True, msg["text"]
|
71 |
+
break
|
72 |
+
except:
|
73 |
+
pass
|
74 |
+
|
75 |
+
"""
|
76 |
+
========================================================================
|
77 |
+
第二部分:子进程Worker(调用主体)
|
78 |
+
========================================================================
|
79 |
+
"""
|
80 |
+
|
81 |
+
|
82 |
+
class ClaudeHandle(Process):
|
83 |
+
def __init__(self):
|
84 |
+
super().__init__(daemon=True)
|
85 |
+
self.parent, self.child = Pipe()
|
86 |
+
self.claude_model = None
|
87 |
+
self.info = ""
|
88 |
+
self.success = True
|
89 |
+
self.local_history = []
|
90 |
+
self.check_dependency()
|
91 |
+
if self.success:
|
92 |
+
self.start()
|
93 |
+
self.threadLock = threading.Lock()
|
94 |
+
|
95 |
+
def check_dependency(self):
|
96 |
+
try:
|
97 |
+
self.success = False
|
98 |
+
import slack_sdk
|
99 |
+
self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
|
100 |
+
self.success = True
|
101 |
+
except:
|
102 |
+
self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。"
|
103 |
+
self.success = False
|
104 |
+
|
105 |
+
def ready(self):
|
106 |
+
return self.claude_model is not None
|
107 |
+
|
108 |
+
async def async_run(self):
|
109 |
+
await self.claude_model.open_channel()
|
110 |
+
while True:
|
111 |
+
# 等待
|
112 |
+
kwargs = self.child.recv()
|
113 |
+
question = kwargs['query']
|
114 |
+
history = kwargs['history']
|
115 |
+
# system_prompt=kwargs['system_prompt']
|
116 |
+
|
117 |
+
# 是否重置
|
118 |
+
if len(self.local_history) > 0 and len(history) == 0:
|
119 |
+
# await self.claude_model.reset()
|
120 |
+
self.local_history = []
|
121 |
+
|
122 |
+
# 开始问问题
|
123 |
+
prompt = ""
|
124 |
+
# Slack API最��不要添加系统提示
|
125 |
+
# if system_prompt not in self.local_history:
|
126 |
+
# self.local_history.append(system_prompt)
|
127 |
+
# prompt += system_prompt + '\n'
|
128 |
+
|
129 |
+
# 追加历史
|
130 |
+
for ab in history:
|
131 |
+
a, b = ab
|
132 |
+
if a not in self.local_history:
|
133 |
+
self.local_history.append(a)
|
134 |
+
prompt += a + '\n'
|
135 |
+
# if b not in self.local_history:
|
136 |
+
# self.local_history.append(b)
|
137 |
+
# prompt += b + '\n'
|
138 |
+
|
139 |
+
# 问题
|
140 |
+
prompt += question
|
141 |
+
self.local_history.append(question)
|
142 |
+
print('question:', prompt)
|
143 |
+
# 提交
|
144 |
+
await self.claude_model.chat(prompt)
|
145 |
+
# 获取回复
|
146 |
+
# async for final, response in self.claude_model.get_reply():
|
147 |
+
# await self.handle_claude_response(final, response)
|
148 |
+
async for final, response in self.claude_model.get_reply():
|
149 |
+
if not final:
|
150 |
+
print(response)
|
151 |
+
self.child.send(str(response))
|
152 |
+
else:
|
153 |
+
# 防止丢失最后一条消息
|
154 |
+
slack_msgs = await self.claude_model.get_slack_messages()
|
155 |
+
last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
|
156 |
+
if last_msg:
|
157 |
+
self.child.send(last_msg)
|
158 |
+
print('-------- receive final ---------')
|
159 |
+
self.child.send('[Finish]')
|
160 |
+
|
161 |
+
def run(self):
|
162 |
+
"""
|
163 |
+
这个函数运行在子进程
|
164 |
+
"""
|
165 |
+
# 第一次运行,加载参数
|
166 |
+
self.success = False
|
167 |
+
self.local_history = []
|
168 |
+
if (self.claude_model is None) or (not self.success):
|
169 |
+
# 代理设置
|
170 |
+
proxies, = get_conf('proxies')
|
171 |
+
if proxies is None:
|
172 |
+
self.proxies_https = None
|
173 |
+
else:
|
174 |
+
self.proxies_https = proxies['https']
|
175 |
+
|
176 |
+
try:
|
177 |
+
SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN')
|
178 |
+
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
|
179 |
+
print('Claude组件初始化成功。')
|
180 |
+
except:
|
181 |
+
self.success = False
|
182 |
+
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
183 |
+
self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
|
184 |
+
self.child.send('[Fail]')
|
185 |
+
self.child.send('[Finish]')
|
186 |
+
raise RuntimeError(f"不能加载Claude组件。")
|
187 |
+
|
188 |
+
self.success = True
|
189 |
+
try:
|
190 |
+
# 进入任务等待状态
|
191 |
+
asyncio.run(self.async_run())
|
192 |
+
except Exception:
|
193 |
+
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
194 |
+
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
195 |
+
self.child.send('[Fail]')
|
196 |
+
self.child.send('[Finish]')
|
197 |
+
|
198 |
+
def stream_chat(self, **kwargs):
|
199 |
+
"""
|
200 |
+
这个函数运行在主进程
|
201 |
+
"""
|
202 |
+
self.threadLock.acquire()
|
203 |
+
self.parent.send(kwargs) # 发送请求到子进程
|
204 |
+
while True:
|
205 |
+
res = self.parent.recv() # 等待Claude回复的片段
|
206 |
+
if res == '[Finish]':
|
207 |
+
break # 结束
|
208 |
+
elif res == '[Fail]':
|
209 |
+
self.success = False
|
210 |
+
break
|
211 |
+
else:
|
212 |
+
yield res # Claude回复的片段
|
213 |
+
self.threadLock.release()
|
214 |
+
|
215 |
+
|
216 |
+
"""
|
217 |
+
========================================================================
|
218 |
+
第三部分:主进程统一调用函数接口
|
219 |
+
========================================================================
|
220 |
+
"""
|
221 |
+
global claude_handle
|
222 |
+
claude_handle = None
|
223 |
+
|
224 |
+
|
225 |
+
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
226 |
+
"""
|
227 |
+
多线程方法
|
228 |
+
函数的说明请见 request_llm/bridge_all.py
|
229 |
+
"""
|
230 |
+
global claude_handle
|
231 |
+
if (claude_handle is None) or (not claude_handle.success):
|
232 |
+
claude_handle = ClaudeHandle()
|
233 |
+
observe_window[0] = load_message + "\n\n" + claude_handle.info
|
234 |
+
if not claude_handle.success:
|
235 |
+
error = claude_handle.info
|
236 |
+
claude_handle = None
|
237 |
+
raise RuntimeError(error)
|
238 |
+
|
239 |
+
# 没有 sys_prompt 接口,因此把prompt加入 history
|
240 |
+
history_feedin = []
|
241 |
+
for i in range(len(history)//2):
|
242 |
+
history_feedin.append([history[2*i], history[2*i+1]])
|
243 |
+
|
244 |
+
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
245 |
+
response = ""
|
246 |
+
observe_window[0] = "[Local Message]: 等待Claude响应中 ..."
|
247 |
+
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
248 |
+
observe_window[0] = preprocess_newbing_out_simple(response)
|
249 |
+
if len(observe_window) >= 2:
|
250 |
+
if (time.time()-observe_window[1]) > watch_dog_patience:
|
251 |
+
raise RuntimeError("程序终止。")
|
252 |
+
return preprocess_newbing_out_simple(response)
|
253 |
+
|
254 |
+
|
255 |
+
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
256 |
+
"""
|
257 |
+
单线程方法
|
258 |
+
函数的说明请见 request_llm/bridge_all.py
|
259 |
+
"""
|
260 |
+
chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ..."))
|
261 |
+
|
262 |
+
global claude_handle
|
263 |
+
if (claude_handle is None) or (not claude_handle.success):
|
264 |
+
claude_handle = ClaudeHandle()
|
265 |
+
chatbot[-1] = (inputs, load_message + "\n\n" + claude_handle.info)
|
266 |
+
yield from update_ui(chatbot=chatbot, history=[])
|
267 |
+
if not claude_handle.success:
|
268 |
+
claude_handle = None
|
269 |
+
return
|
270 |
+
|
271 |
+
if additional_fn is not None:
|
272 |
+
import core_functional
|
273 |
+
importlib.reload(core_functional) # 热更新prompt
|
274 |
+
core_functional = core_functional.get_core_functions()
|
275 |
+
if "PreProcess" in core_functional[additional_fn]:
|
276 |
+
inputs = core_functional[additional_fn]["PreProcess"](
|
277 |
+
inputs) # 获取预处理函数(如果有的话)
|
278 |
+
inputs = core_functional[additional_fn]["Prefix"] + \
|
279 |
+
inputs + core_functional[additional_fn]["Suffix"]
|
280 |
+
|
281 |
+
history_feedin = []
|
282 |
+
for i in range(len(history)//2):
|
283 |
+
history_feedin.append([history[2*i], history[2*i+1]])
|
284 |
+
|
285 |
+
chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...")
|
286 |
+
response = "[Local Message]: 等待Claude响应中 ..."
|
287 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
288 |
+
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
|
289 |
+
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
290 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
291 |
+
if response == "[Local Message]: 等待Claude响应中 ...":
|
292 |
+
response = "[Local Message]: Claude响应异常,请刷新界面重试 ..."
|
293 |
+
history.extend([inputs, response])
|
294 |
+
logging.info(f'[raw_input] {inputs}')
|
295 |
+
logging.info(f'[response] {response}')
|
296 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
request_llm/requirements_slackclaude.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
slack-sdk==3.21.3
|
version
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"version": 3.
|
3 |
"show_feature": true,
|
4 |
-
"new_feature": "
|
5 |
}
|
|
|
1 |
{
|
2 |
+
"version": 3.35,
|
3 |
"show_feature": true,
|
4 |
+
"new_feature": "添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能"
|
5 |
}
|