|
""" |
|
来自 https://github.com/OpenLMLab/MOSS/blob/main/moss_web_demo_gradio.py |
|
|
|
|
|
# 难点 |
|
|
|
|
|
|
|
# 单卡报错 |
|
python moss_web_demo_gradio.py --model_name fnlp/moss-moon-003-sft --gpu 0,1,2,3 |
|
|
|
# TODO |
|
- 第一句: |
|
- 代码和表格的预览 |
|
- 可编辑chatbot:https://github.com/gradio-app/gradio/issues/4444 |
|
- 一个button, |
|
|
|
## Reference |
|
|
|
- |
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
|
from models.cpp_qwen2 import bot |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_text(text): |
|
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" |
|
lines = text.split("\n") |
|
lines = [line for line in lines if line != ""] |
|
count = 0 |
|
for i, line in enumerate(lines): |
|
if "```" in line: |
|
count += 1 |
|
items = line.split('`') |
|
if count % 2 == 1: |
|
lines[i] = f'<pre><code class="language-{items[-1]}">' |
|
else: |
|
lines[i] = f'<br></code></pre>' |
|
else: |
|
if i > 0: |
|
if count % 2 == 1: |
|
line = line.replace("`", "\`") |
|
line = line.replace("<", "<") |
|
line = line.replace(">", ">") |
|
line = line.replace(" ", " ") |
|
line = line.replace("*", "*") |
|
line = line.replace("_", "_") |
|
line = line.replace("-", "-") |
|
line = line.replace(".", ".") |
|
line = line.replace("!", "!") |
|
line = line.replace("(", "(") |
|
line = line.replace(")", ")") |
|
line = line.replace("$", "$") |
|
lines[i] = "<br>" + line |
|
text = "".join(lines) |
|
return text |
|
|
|
|
|
def generate_query(chatbot, history): |
|
if history and history[-1]["role"] == "user": |
|
gr.Warning('You should generate assistant-response.') |
|
yield None, chatbot, history |
|
else: |
|
chatbot.append(None) |
|
streamer = bot.generate_query(history, stream=True) |
|
for query in streamer: |
|
chatbot[-1] = (query, None) |
|
yield query, chatbot, history |
|
|
|
history.append({"role": "user", "content": query}) |
|
yield query, chatbot, history |
|
|
|
|
|
def generate_response(query, chatbot, history): |
|
""" |
|
自动模式下:query is None |
|
人工模式下:query 是用户输入 |
|
:param query: |
|
:param chatbot: |
|
:param history: |
|
:return: |
|
""" |
|
if query and history[-1]["role"] != "user": |
|
history.append({"role": "user", "content": query}) |
|
query = history[-1]["content"] |
|
|
|
if history[-1]["role"] != "user": |
|
gr.Warning('You should generate or type user-input first.') |
|
yield chatbot, history |
|
else: |
|
streamer = bot.generate_response(history, stream=True) |
|
for response in streamer: |
|
chatbot[-1] = (query, response) |
|
yield chatbot, history |
|
|
|
history.append({"role": "assistant", "content": response}) |
|
print(f"chatbot is {chatbot}") |
|
print(f"history is {history}") |
|
yield chatbot, history |
|
|
|
|
|
def generate(): |
|
""" |
|
|
|
:return: |
|
""" |
|
pass |
|
|
|
|
|
def regenerate(): |
|
""" |
|
删除上一轮,重新生成。 |
|
:return: |
|
""" |
|
pass |
|
|
|
|
|
def reset_user_input(): |
|
return gr.update(value='') |
|
|
|
|
|
def reset_state(system): |
|
return [], [{"role": "system", "content": system}] |
|
|
|
|
|
system_list = [ |
|
"You are a helpful assistant.", |
|
"你是一个导游。", |
|
"你是一个英语老师。", |
|
"你是一个程序员。", |
|
"你是一个心理咨询师。", |
|
] |
|
|
|
""" |
|
TODO: 使用说明 |
|
""" |
|
with gr.Blocks() as demo: |
|
|
|
gr.HTML("""<h1 align="center">Distilling the Knowledge through Self Chatting</h1>""") |
|
system = gr.Dropdown( |
|
choices=system_list, |
|
value=system_list[0], |
|
allow_custom_value=True, |
|
interactive=True, |
|
label="System message" |
|
) |
|
chatbot = gr.Chatbot(avatar_images=("assets/man.png", "assets/bot.png")) |
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10) |
|
with gr.Row(): |
|
generate_query_btn = gr.Button("生成问题") |
|
regen_btn = gr.Button("🤔️ Regenerate (重试)") |
|
submit_btn = gr.Button("生成回复", variant="primary") |
|
stop_btn = gr.Button("停止生成", variant="primary") |
|
empty_btn = gr.Button("🧹 Clear History (清除历史)") |
|
with gr.Column(scale=1): |
|
|
|
|
|
clear_btn = gr.Button("重置") |
|
gr.Dropdown( |
|
["moss", "chatglm-2", "chatpdf"], |
|
value="moss", |
|
label="问题生成器", |
|
|
|
), |
|
gr.Dropdown( |
|
["moss", "chatglm-2", "gpt3.5-turbo"], |
|
value="gpt3.5-turbo", |
|
label="回复生成器", |
|
|
|
), |
|
|
|
history = gr.State([{"role": "system", "content": system_list[0]}]) |
|
|
|
system.change(reset_state, inputs=[system], outputs=[chatbot, history], show_progress="full") |
|
|
|
submit_btn.click(generate_response, [user_input, chatbot, history], [chatbot, history], |
|
show_progress="full") |
|
|
|
|
|
clear_btn.click(reset_state, inputs=[system], outputs=[chatbot, history], show_progress="full") |
|
|
|
generate_query_btn.click(generate_query, [chatbot, history], outputs=[user_input, chatbot, history], |
|
show_progress="full") |
|
|
|
|
|
|
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", |
|
info="Larger temperature increase the randomness"), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
), |
|
|
|
demo.queue().launch(share=False, server_name="0.0.0.0") |
|
|
|
|