update parameters
Browse files
app.py
CHANGED
@@ -16,17 +16,18 @@ urllib.request.urlretrieve(
|
|
16 |
parser = argparse.ArgumentParser()
|
17 |
parser.add_argument("-m", "--model", default=DEFAULT_MODEL_PATH, type=Path, help="model path")
|
18 |
parser.add_argument("--mode", default="chat", type=str, choices=["chat", "generate"], help="inference mode")
|
19 |
-
parser.add_argument("-l", "--max_length", default=
|
20 |
-
parser.add_argument("-c", "--max_context_length", default=
|
21 |
-
parser.add_argument("--top_k", default=
|
22 |
parser.add_argument("--top_p", default=0.7, type=float, help="top-p sampling")
|
23 |
-
parser.add_argument("--temp", default=0.
|
24 |
parser.add_argument("--repeat_penalty", default=1.0, type=float, help="penalize repeat sequence of tokens")
|
25 |
parser.add_argument("-t", "--threads", default=0, type=int, help="number of threads for inference")
|
26 |
parser.add_argument("--plain", action="store_true", help="display in plain text without markdown support")
|
27 |
args = parser.parse_args()
|
28 |
|
29 |
pipeline = chatglm_cpp.Pipeline(args.model)
|
|
|
30 |
|
31 |
|
32 |
def postprocess(text):
|
@@ -38,6 +39,7 @@ def postprocess(text):
|
|
38 |
def predict(input, chatbot, max_length, top_p, temperature, messages):
|
39 |
chatbot.append((postprocess(input), ""))
|
40 |
messages.append(chatglm_cpp.ChatMessage(role="user", content=input))
|
|
|
41 |
|
42 |
generation_kwargs = dict(
|
43 |
max_length=max_length,
|
@@ -52,19 +54,13 @@ def predict(input, chatbot, max_length, top_p, temperature, messages):
|
|
52 |
)
|
53 |
|
54 |
response = ""
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
messages.append(pipeline.merge_streaming_messages(chunks))
|
63 |
-
else:
|
64 |
-
for chunk in pipeline.generate(input, **generation_kwargs):
|
65 |
-
response += chunk
|
66 |
-
chatbot[-1] = (chatbot[-1][0], postprocess(response))
|
67 |
-
yield chatbot, messages
|
68 |
|
69 |
yield chatbot, messages
|
70 |
|
@@ -76,9 +72,16 @@ def reset_user_input():
|
|
76 |
def reset_state():
|
77 |
return [], []
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
with gr.Blocks() as demo:
|
81 |
-
gr.HTML(
|
82 |
|
83 |
chatbot = gr.Chatbot()
|
84 |
with gr.Row():
|
@@ -86,7 +89,7 @@ with gr.Blocks() as demo:
|
|
86 |
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8)
|
87 |
submitBtn = gr.Button("Submit", variant="primary")
|
88 |
with gr.Column(scale=1):
|
89 |
-
max_length = gr.Slider(0,
|
90 |
top_p = gr.Slider(0, 1, value=args.top_p, step=0.01, label="Top P", interactive=True)
|
91 |
temperature = gr.Slider(0, 1, value=args.temp, step=0.01, label="Temperature", interactive=True)
|
92 |
emptyBtn = gr.Button("Clear History")
|
|
|
16 |
parser = argparse.ArgumentParser()
|
17 |
parser.add_argument("-m", "--model", default=DEFAULT_MODEL_PATH, type=Path, help="model path")
|
18 |
parser.add_argument("--mode", default="chat", type=str, choices=["chat", "generate"], help="inference mode")
|
19 |
+
parser.add_argument("-l", "--max_length", default=64, type=int, help="max total length including prompt and output")
|
20 |
+
parser.add_argument("-c", "--max_context_length", default=1024, type=int, help="max context length")
|
21 |
+
parser.add_argument("--top_k", default=40, type=int, help="top-k sampling")
|
22 |
parser.add_argument("--top_p", default=0.7, type=float, help="top-p sampling")
|
23 |
+
parser.add_argument("--temp", default=0.5, type=float, help="temperature")
|
24 |
parser.add_argument("--repeat_penalty", default=1.0, type=float, help="penalize repeat sequence of tokens")
|
25 |
parser.add_argument("-t", "--threads", default=0, type=int, help="number of threads for inference")
|
26 |
parser.add_argument("--plain", action="store_true", help="display in plain text without markdown support")
|
27 |
args = parser.parse_args()
|
28 |
|
29 |
pipeline = chatglm_cpp.Pipeline(args.model)
|
30 |
+
system_message = chatglm_cpp.ChatMessage(role="system", content="请你现在扮演一个软件工程师,名字叫做贺英旭。你需要以这个身份和朋友们对话。")
|
31 |
|
32 |
|
33 |
def postprocess(text):
|
|
|
39 |
def predict(input, chatbot, max_length, top_p, temperature, messages):
|
40 |
chatbot.append((postprocess(input), ""))
|
41 |
messages.append(chatglm_cpp.ChatMessage(role="user", content=input))
|
42 |
+
full_messages = [system_message] + messages
|
43 |
|
44 |
generation_kwargs = dict(
|
45 |
max_length=max_length,
|
|
|
54 |
)
|
55 |
|
56 |
response = ""
|
57 |
+
chunks = []
|
58 |
+
for chunk in pipeline.chat(full_messages, **generation_kwargs):
|
59 |
+
response += chunk.content
|
60 |
+
chunks.append(chunk)
|
61 |
+
chatbot[-1] = (chatbot[-1][0], postprocess(response))
|
62 |
+
yield chatbot, messages
|
63 |
+
messages.append(pipeline.merge_streaming_messages(chunks))
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
yield chatbot, messages
|
66 |
|
|
|
72 |
def reset_state():
|
73 |
return [], []
|
74 |
|
75 |
+
title = """
|
76 |
+
<div style="text-align: center;max-width: 700px;">
|
77 |
+
<h1>Chichat</h1>
|
78 |
+
<p style="text-align: center;">Free feel to talk about anything :)</p>
|
79 |
+
</div>
|
80 |
+
"""
|
81 |
+
|
82 |
|
83 |
with gr.Blocks() as demo:
|
84 |
+
gr.HTML(title)
|
85 |
|
86 |
chatbot = gr.Chatbot()
|
87 |
with gr.Row():
|
|
|
89 |
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8)
|
90 |
submitBtn = gr.Button("Submit", variant="primary")
|
91 |
with gr.Column(scale=1):
|
92 |
+
max_length = gr.Slider(0, 512, value=args.max_length, step=1.0, label="Maximum Length", interactive=True)
|
93 |
top_p = gr.Slider(0, 1, value=args.top_p, step=0.01, label="Top P", interactive=True)
|
94 |
temperature = gr.Slider(0, 1, value=args.temp, step=0.01, label="Temperature", interactive=True)
|
95 |
emptyBtn = gr.Button("Clear History")
|