Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
4475ab1
1
Parent(s):
c9610d9
统一变量名,修改部分用语
Browse files- ChuanhuChatbot.py +4 -4
- modules/base_model.py +5 -5
- modules/models.py +5 -5
- modules/presets.py +17 -18
ChuanhuChatbot.py
CHANGED
@@ -29,7 +29,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
29 |
|
30 |
with gr.Row():
|
31 |
with gr.Column():
|
32 |
-
gr.HTML(
|
33 |
user_info = gr.Markdown(value="", elem_id="user_info")
|
34 |
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
35 |
|
@@ -82,7 +82,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
82 |
label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0], interactive=True
|
83 |
)
|
84 |
use_streaming_checkbox = gr.Checkbox(
|
85 |
-
label="实时传输回答", value=True, visible=
|
86 |
)
|
87 |
use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
|
88 |
language_select_dropdown = gr.Dropdown(
|
@@ -196,8 +196,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
196 |
)
|
197 |
changeProxyBtn = gr.Button("🔄 设置代理地址")
|
198 |
|
199 |
-
gr.Markdown(
|
200 |
-
gr.HTML(
|
201 |
chatgpt_predict_args = dict(
|
202 |
fn=current_model.value.predict,
|
203 |
inputs=[
|
|
|
29 |
|
30 |
with gr.Row():
|
31 |
with gr.Column():
|
32 |
+
gr.HTML(CHUANHU_TITLE)
|
33 |
user_info = gr.Markdown(value="", elem_id="user_info")
|
34 |
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
35 |
|
|
|
82 |
label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0], interactive=True
|
83 |
)
|
84 |
use_streaming_checkbox = gr.Checkbox(
|
85 |
+
label="实时传输回答", value=True, visible=ENABLE_STREAMING_OPTION
|
86 |
)
|
87 |
use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
|
88 |
language_select_dropdown = gr.Dropdown(
|
|
|
196 |
)
|
197 |
changeProxyBtn = gr.Button("🔄 设置代理地址")
|
198 |
|
199 |
+
gr.Markdown(CHUANHU_DESCRIPTION)
|
200 |
+
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
201 |
chatgpt_predict_args = dict(
|
202 |
fn=current_model.value.predict,
|
203 |
inputs=[
|
modules/base_model.py
CHANGED
@@ -72,7 +72,7 @@ class BaseLLMModel:
|
|
72 |
|
73 |
def billing_info(self):
|
74 |
"""get billing infomation, inplement if needed"""
|
75 |
-
return
|
76 |
|
77 |
|
78 |
def count_token(self, user_input):
|
@@ -221,7 +221,7 @@ class BaseLLMModel:
|
|
221 |
display_reference = ""
|
222 |
|
223 |
if len(self.api_key) == 0 and not shared.state.multi_api_key:
|
224 |
-
status_text =
|
225 |
logging.info(status_text)
|
226 |
chatbot.append((inputs, ""))
|
227 |
if len(self.history) == 0:
|
@@ -233,7 +233,7 @@ class BaseLLMModel:
|
|
233 |
yield chatbot + [(inputs, "")], status_text
|
234 |
return
|
235 |
elif len(inputs.strip()) == 0:
|
236 |
-
status_text =
|
237 |
logging.info(status_text)
|
238 |
yield chatbot + [(inputs, "")], status_text
|
239 |
return
|
@@ -299,7 +299,7 @@ class BaseLLMModel:
|
|
299 |
):
|
300 |
logging.info("重试中……")
|
301 |
if len(self.history) == 0:
|
302 |
-
yield chatbot, f"{
|
303 |
return
|
304 |
|
305 |
del self.history[-2:]
|
@@ -362,7 +362,7 @@ class BaseLLMModel:
|
|
362 |
return self.token_message()
|
363 |
|
364 |
def delete_last_conversation(self, chatbot):
|
365 |
-
if len(chatbot) > 0 and
|
366 |
msg = "由于包含报错信息,只删除chatbot记录"
|
367 |
chatbot.pop()
|
368 |
return chatbot, self.history
|
|
|
72 |
|
73 |
def billing_info(self):
|
74 |
"""get billing infomation, inplement if needed"""
|
75 |
+
return BILLING_NOT_APPLICABLE_MSG
|
76 |
|
77 |
|
78 |
def count_token(self, user_input):
|
|
|
221 |
display_reference = ""
|
222 |
|
223 |
if len(self.api_key) == 0 and not shared.state.multi_api_key:
|
224 |
+
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
|
225 |
logging.info(status_text)
|
226 |
chatbot.append((inputs, ""))
|
227 |
if len(self.history) == 0:
|
|
|
233 |
yield chatbot + [(inputs, "")], status_text
|
234 |
return
|
235 |
elif len(inputs.strip()) == 0:
|
236 |
+
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
|
237 |
logging.info(status_text)
|
238 |
yield chatbot + [(inputs, "")], status_text
|
239 |
return
|
|
|
299 |
):
|
300 |
logging.info("重试中……")
|
301 |
if len(self.history) == 0:
|
302 |
+
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
|
303 |
return
|
304 |
|
305 |
del self.history[-2:]
|
|
|
362 |
return self.token_message()
|
363 |
|
364 |
def delete_last_conversation(self, chatbot):
|
365 |
+
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
|
366 |
msg = "由于包含报错信息,只删除chatbot记录"
|
367 |
chatbot.pop()
|
368 |
return chatbot, self.history
|
modules/models.py
CHANGED
@@ -47,7 +47,7 @@ class OpenAIClient(BaseLLMModel):
|
|
47 |
partial_text += i
|
48 |
yield partial_text
|
49 |
else:
|
50 |
-
yield
|
51 |
|
52 |
def get_answer_at_once(self):
|
53 |
response = self._get_response()
|
@@ -77,14 +77,14 @@ class OpenAIClient(BaseLLMModel):
|
|
77 |
rounded_usage = "{:.5f}".format(usage_data['total_usage']/100)
|
78 |
return f"**本月使用金额** \u3000 ${rounded_usage}"
|
79 |
except requests.exceptions.ConnectTimeout:
|
80 |
-
status_text =
|
81 |
return status_text
|
82 |
except requests.exceptions.ReadTimeout:
|
83 |
-
status_text =
|
84 |
return status_text
|
85 |
except Exception as e:
|
86 |
logging.error(f"获取API使用情况失败:"+str(e))
|
87 |
-
return
|
88 |
|
89 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
90 |
def _get_response(self, stream=False):
|
@@ -114,7 +114,7 @@ class OpenAIClient(BaseLLMModel):
|
|
114 |
"frequency_penalty": 0,
|
115 |
}
|
116 |
if stream:
|
117 |
-
timeout =
|
118 |
else:
|
119 |
timeout = TIMEOUT_ALL
|
120 |
|
|
|
47 |
partial_text += i
|
48 |
yield partial_text
|
49 |
else:
|
50 |
+
yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
|
51 |
|
52 |
def get_answer_at_once(self):
|
53 |
response = self._get_response()
|
|
|
77 |
rounded_usage = "{:.5f}".format(usage_data['total_usage']/100)
|
78 |
return f"**本月使用金额** \u3000 ${rounded_usage}"
|
79 |
except requests.exceptions.ConnectTimeout:
|
80 |
+
status_text = STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
81 |
return status_text
|
82 |
except requests.exceptions.ReadTimeout:
|
83 |
+
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
84 |
return status_text
|
85 |
except Exception as e:
|
86 |
logging.error(f"获取API使用情况失败:"+str(e))
|
87 |
+
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
88 |
|
89 |
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
90 |
def _get_response(self, stream=False):
|
|
|
114 |
"frequency_penalty": 0,
|
115 |
}
|
116 |
if stream:
|
117 |
+
timeout = TIMEOUT_STREAMING
|
118 |
else:
|
119 |
timeout = TIMEOUT_ALL
|
120 |
|
modules/presets.py
CHANGED
@@ -13,43 +13,42 @@ HISTORY_DIR = "history"
|
|
13 |
TEMPLATES_DIR = "templates"
|
14 |
|
15 |
# 错误信息
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
TIMEOUT_ALL = 200 # 非流式对话时的超时时间
|
29 |
-
|
30 |
HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
31 |
CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
|
32 |
|
33 |
SIM_K = 5
|
34 |
INDEX_QUERY_TEMPRATURE = 1.0
|
35 |
|
36 |
-
|
37 |
-
|
38 |
<div align="center" style="margin:16px 0">
|
39 |
|
40 |
由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
|
41 |
|
42 |
访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
|
43 |
|
44 |
-
此App使用 `gpt-3.5-turbo` 大语言模型
|
45 |
</div>
|
46 |
"""
|
47 |
|
48 |
-
|
49 |
<div class="versions">{versions}</div>
|
50 |
"""
|
51 |
|
52 |
-
|
53 |
|
54 |
MODELS = [
|
55 |
"gpt-3.5-turbo",
|
|
|
13 |
TEMPLATES_DIR = "templates"
|
14 |
|
15 |
# 错误信息
|
16 |
+
STANDARD_ERROR_MSG = "☹️发生了错误:" # 错误信息的标准前缀
|
17 |
+
GENERAL_ERROR_MSG = "获取对话时发生错误,请查看后台日志"
|
18 |
+
ERROR_RETRIEVE_MSG = "请检查网络连接,或者API-Key是否有效。"
|
19 |
+
CONNECTION_TIMEOUT_MSG = "连接超时,无法获取对话。" # 连接超时
|
20 |
+
READ_TIMEOUT_MSG = "读取超时,无法获取对话。" # 读取超时
|
21 |
+
PROXY_ERROR_MSG = "代理错误,无法获取对话。" # 代理错误
|
22 |
+
SSL_ERROR_PROMPT = "SSL错误,无法获取对话。" # SSL 错误
|
23 |
+
NO_APIKEY_MSG = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
|
24 |
+
NO_INPUT_MSG = "请输入对话内容。" # 未输入对话内容
|
25 |
+
BILLING_NOT_APPLICABLE_MSG = "模型本地运行中" # 本地运行的模型返回的账单信息
|
26 |
+
|
27 |
+
TIMEOUT_STREAMING = 10 # 流式对话时的超时时间
|
28 |
TIMEOUT_ALL = 200 # 非流式对话时的超时时间
|
29 |
+
ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框
|
30 |
HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
31 |
CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
|
32 |
|
33 |
SIM_K = 5
|
34 |
INDEX_QUERY_TEMPRATURE = 1.0
|
35 |
|
36 |
+
CHUANHU_TITLE = """<h1 align="left" style="min-width:200px; margin-top:6px; white-space: nowrap;">川虎ChatGPT 🚀</h1>"""
|
37 |
+
CHUANHU_DESCRIPTION = """\
|
38 |
<div align="center" style="margin:16px 0">
|
39 |
|
40 |
由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
|
41 |
|
42 |
访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
|
43 |
|
|
|
44 |
</div>
|
45 |
"""
|
46 |
|
47 |
+
FOOTER = """\
|
48 |
<div class="versions">{versions}</div>
|
49 |
"""
|
50 |
|
51 |
+
SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
|
52 |
|
53 |
MODELS = [
|
54 |
"gpt-3.5-turbo",
|