Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
0bdeb88
1
Parent(s):
9cfd027
为GPT4做准备。加入模型选择功能。
Browse files- ChuanhuChatbot.py +11 -9
- Dockerfile 2 +14 -0
- presets.py +1 -0
- requirements 2.txt +1 -0
- utils.py +13 -13
ChuanhuChatbot.py
CHANGED
@@ -51,10 +51,12 @@ with gr.Blocks(css=customCSS) as demo:
|
|
51 |
|
52 |
gr.HTML(title)
|
53 |
with gr.Row():
|
54 |
-
with gr.Column(
|
55 |
-
keyTxt = gr.Textbox(show_label=
|
56 |
-
with gr.Column(
|
57 |
-
|
|
|
|
|
58 |
chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
|
59 |
with gr.Row():
|
60 |
with gr.Column(scale=12):
|
@@ -108,20 +110,20 @@ with gr.Blocks(css=customCSS) as demo:
|
|
108 |
gr.Markdown(description)
|
109 |
|
110 |
|
111 |
-
user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
|
112 |
user_input.submit(reset_textbox, [], [user_input])
|
113 |
|
114 |
-
submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
|
115 |
submitBtn.click(reset_textbox, [], [user_input])
|
116 |
|
117 |
emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
|
118 |
|
119 |
-
retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
|
120 |
|
121 |
-
delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [
|
122 |
chatbot, history, token_count, status_display], show_progress=True)
|
123 |
|
124 |
-
reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
|
125 |
|
126 |
saveHistoryBtn.click(save_chat_history, [
|
127 |
saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
|
|
|
51 |
|
52 |
gr.HTML(title)
|
53 |
with gr.Row():
|
54 |
+
with gr.Column():
|
55 |
+
keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY, label="API-Key")
|
56 |
+
with gr.Column():
|
57 |
+
with gr.Row():
|
58 |
+
model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0])
|
59 |
+
use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
|
60 |
chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
|
61 |
with gr.Row():
|
62 |
with gr.Column(scale=12):
|
|
|
110 |
gr.Markdown(description)
|
111 |
|
112 |
|
113 |
+
user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
|
114 |
user_input.submit(reset_textbox, [], [user_input])
|
115 |
|
116 |
+
submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
|
117 |
submitBtn.click(reset_textbox, [], [user_input])
|
118 |
|
119 |
emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
|
120 |
|
121 |
+
retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
|
122 |
|
123 |
+
delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox, model_select_dropdown], [
|
124 |
chatbot, history, token_count, status_display], show_progress=True)
|
125 |
|
126 |
+
reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
|
127 |
|
128 |
saveHistoryBtn.click(save_chat_history, [
|
129 |
saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
|
Dockerfile 2
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9 as builder
|
2 |
+
RUN apt-get update && apt-get install -y build-essential
|
3 |
+
COPY requirements.txt .
|
4 |
+
RUN pip install --user -r requirements.txt
|
5 |
+
|
6 |
+
FROM python:3.9
|
7 |
+
MAINTAINER iskoldt
|
8 |
+
COPY --from=builder /root/.local /root/.local
|
9 |
+
ENV PATH=/root/.local/bin:$PATH
|
10 |
+
COPY . /app
|
11 |
+
WORKDIR /app
|
12 |
+
ENV my_api_key empty
|
13 |
+
ENV dockerrun yes
|
14 |
+
CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
|
presets.py
CHANGED
@@ -32,6 +32,7 @@ pre code {
|
|
32 |
"""
|
33 |
|
34 |
summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
|
|
|
35 |
|
36 |
# 错误信息
|
37 |
standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
|
|
|
32 |
"""
|
33 |
|
34 |
summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
|
35 |
+
MODELS = ["gpt-3.5-turbo"] # 可选的模型
|
36 |
|
37 |
# 错误信息
|
38 |
standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
|
requirements 2.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
git+https://github.com/GaiZhenbiao/gradio.git
|
utils.py
CHANGED
@@ -99,7 +99,7 @@ def construct_assistant(text):
|
|
99 |
def construct_token_message(token, stream=False):
|
100 |
return f"Token 计数: {token}"
|
101 |
|
102 |
-
def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
|
103 |
headers = {
|
104 |
"Content-Type": "application/json",
|
105 |
"Authorization": f"Bearer {openai_api_key}"
|
@@ -108,7 +108,7 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
|
|
108 |
history = [construct_system(system_prompt), *history]
|
109 |
|
110 |
payload = {
|
111 |
-
"model":
|
112 |
"messages": history, # [{"role": "user", "content": f"{inputs}"}],
|
113 |
"temperature": temperature, # 1.0,
|
114 |
"top_p": top_p, # 1.0,
|
@@ -124,7 +124,7 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
|
|
124 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
|
125 |
return response
|
126 |
|
127 |
-
def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature):
|
128 |
def get_return_value():
|
129 |
return chatbot, history, status_text, all_token_counts
|
130 |
|
@@ -145,7 +145,7 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_
|
|
145 |
print(f"输入token计数: {user_token_count}")
|
146 |
yield get_return_value()
|
147 |
try:
|
148 |
-
response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
|
149 |
except requests.exceptions.ConnectTimeout:
|
150 |
status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
151 |
yield get_return_value()
|
@@ -192,14 +192,14 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_
|
|
192 |
yield get_return_value()
|
193 |
|
194 |
|
195 |
-
def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature):
|
196 |
print("一次性回答模式")
|
197 |
history.append(construct_user(inputs))
|
198 |
history.append(construct_assistant(""))
|
199 |
chatbot.append((parse_text(inputs), ""))
|
200 |
all_token_counts.append(count_token(inputs))
|
201 |
try:
|
202 |
-
response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
|
203 |
except requests.exceptions.ConnectTimeout:
|
204 |
status_text = standard_error_msg + error_retrieve_prompt
|
205 |
return chatbot, history, status_text, all_token_counts
|
@@ -218,7 +218,7 @@ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_tok
|
|
218 |
return chatbot, history, status_text, all_token_counts
|
219 |
|
220 |
|
221 |
-
def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
|
222 |
print("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
223 |
if len(openai_api_key) != 51:
|
224 |
status_text = standard_error_msg + no_apikey_msg
|
@@ -232,12 +232,12 @@ def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_c
|
|
232 |
yield chatbot, history, "开始生成回答……", all_token_counts
|
233 |
if stream:
|
234 |
print("使用流式传输")
|
235 |
-
iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature)
|
236 |
for chatbot, history, status_text, all_token_counts in iter:
|
237 |
yield chatbot, history, status_text, all_token_counts
|
238 |
else:
|
239 |
print("不使用流式传输")
|
240 |
-
chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature)
|
241 |
yield chatbot, history, status_text, all_token_counts
|
242 |
print(f"传输完毕。当前token计数为{all_token_counts}")
|
243 |
if len(history) > 1 and history[-1]['content'] != inputs:
|
@@ -254,7 +254,7 @@ def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_c
|
|
254 |
yield chatbot, history, status_text, all_token_counts
|
255 |
|
256 |
|
257 |
-
def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
|
258 |
print("重试中……")
|
259 |
if len(history) == 0:
|
260 |
yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
|
@@ -262,15 +262,15 @@ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, t
|
|
262 |
history.pop()
|
263 |
inputs = history.pop()["content"]
|
264 |
token_count.pop()
|
265 |
-
iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
|
266 |
print("重试完毕")
|
267 |
for x in iter:
|
268 |
yield x
|
269 |
|
270 |
|
271 |
-
def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
|
272 |
print("开始减少token数量……")
|
273 |
-
iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
|
274 |
for chatbot, history, status_text, previous_token_count in iter:
|
275 |
history = history[-2:]
|
276 |
token_count = previous_token_count[-1:]
|
|
|
99 |
def construct_token_message(token, stream=False):
|
100 |
return f"Token 计数: {token}"
|
101 |
|
102 |
+
def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model):
|
103 |
headers = {
|
104 |
"Content-Type": "application/json",
|
105 |
"Authorization": f"Bearer {openai_api_key}"
|
|
|
108 |
history = [construct_system(system_prompt), *history]
|
109 |
|
110 |
payload = {
|
111 |
+
"model": selected_model,
|
112 |
"messages": history, # [{"role": "user", "content": f"{inputs}"}],
|
113 |
"temperature": temperature, # 1.0,
|
114 |
"top_p": top_p, # 1.0,
|
|
|
124 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
|
125 |
return response
|
126 |
|
127 |
+
def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
|
128 |
def get_return_value():
|
129 |
return chatbot, history, status_text, all_token_counts
|
130 |
|
|
|
145 |
print(f"输入token计数: {user_token_count}")
|
146 |
yield get_return_value()
|
147 |
try:
|
148 |
+
response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True, selected_model)
|
149 |
except requests.exceptions.ConnectTimeout:
|
150 |
status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
151 |
yield get_return_value()
|
|
|
192 |
yield get_return_value()
|
193 |
|
194 |
|
195 |
+
def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
|
196 |
print("一次性回答模式")
|
197 |
history.append(construct_user(inputs))
|
198 |
history.append(construct_assistant(""))
|
199 |
chatbot.append((parse_text(inputs), ""))
|
200 |
all_token_counts.append(count_token(inputs))
|
201 |
try:
|
202 |
+
response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
|
203 |
except requests.exceptions.ConnectTimeout:
|
204 |
status_text = standard_error_msg + error_retrieve_prompt
|
205 |
return chatbot, history, status_text, all_token_counts
|
|
|
218 |
return chatbot, history, status_text, all_token_counts
|
219 |
|
220 |
|
221 |
+
def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model = MODELS[0], should_check_token_count = True): # repetition_penalty, top_k
|
222 |
print("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
223 |
if len(openai_api_key) != 51:
|
224 |
status_text = standard_error_msg + no_apikey_msg
|
|
|
232 |
yield chatbot, history, "开始生成回答……", all_token_counts
|
233 |
if stream:
|
234 |
print("使用流式传输")
|
235 |
+
iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
|
236 |
for chatbot, history, status_text, all_token_counts in iter:
|
237 |
yield chatbot, history, status_text, all_token_counts
|
238 |
else:
|
239 |
print("不使用流式传输")
|
240 |
+
chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
|
241 |
yield chatbot, history, status_text, all_token_counts
|
242 |
print(f"传输完毕。当前token计数为{all_token_counts}")
|
243 |
if len(history) > 1 and history[-1]['content'] != inputs:
|
|
|
254 |
yield chatbot, history, status_text, all_token_counts
|
255 |
|
256 |
|
257 |
+
def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0]):
|
258 |
print("重试中……")
|
259 |
if len(history) == 0:
|
260 |
yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
|
|
|
262 |
history.pop()
|
263 |
inputs = history.pop()["content"]
|
264 |
token_count.pop()
|
265 |
+
iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream, selected_model=selected_model)
|
266 |
print("重试完毕")
|
267 |
for x in iter:
|
268 |
yield x
|
269 |
|
270 |
|
271 |
+
def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False, selected_model = MODELS[0]):
|
272 |
print("开始减少token数量……")
|
273 |
+
iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, selected_model = selected_model, should_check_token_count=False)
|
274 |
for chatbot, history, status_text, previous_token_count in iter:
|
275 |
history = history[-2:]
|
276 |
token_count = previous_token_count[-1:]
|