init
Browse files- app.py +109 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Adapted from https://github.com/THUDM/ChatGLM-6B/blob/main/web_demo.py
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
import chatglm_cpp
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
import urllib
|
10 |
+
|
11 |
+
DEFAULT_MODEL_PATH = "chatglm3-6b.bin"
|
12 |
+
|
13 |
+
testfile = urllib.URLopener()
|
14 |
+
testfile.retrieve(
|
15 |
+
"https://huggingface.co/Braddy/chatglm3-6b-chitchat/resolve/main/q5_1.bin?download=true",
|
16 |
+
DEFAULT_MODEL_PATH
|
17 |
+
)
|
18 |
+
|
19 |
+
parser = argparse.ArgumentParser()
|
20 |
+
parser.add_argument("-m", "--model", default=DEFAULT_MODEL_PATH, type=Path, help="model path")
|
21 |
+
parser.add_argument("--mode", default="chat", type=str, choices=["chat", "generate"], help="inference mode")
|
22 |
+
parser.add_argument("-l", "--max_length", default=2048, type=int, help="max total length including prompt and output")
|
23 |
+
parser.add_argument("-c", "--max_context_length", default=512, type=int, help="max context length")
|
24 |
+
parser.add_argument("--top_k", default=0, type=int, help="top-k sampling")
|
25 |
+
parser.add_argument("--top_p", default=0.7, type=float, help="top-p sampling")
|
26 |
+
parser.add_argument("--temp", default=0.95, type=float, help="temperature")
|
27 |
+
parser.add_argument("--repeat_penalty", default=1.0, type=float, help="penalize repeat sequence of tokens")
|
28 |
+
parser.add_argument("-t", "--threads", default=0, type=int, help="number of threads for inference")
|
29 |
+
parser.add_argument("--plain", action="store_true", help="display in plain text without markdown support")
|
30 |
+
args = parser.parse_args()
|
31 |
+
|
32 |
+
pipeline = chatglm_cpp.Pipeline(args.model)
|
33 |
+
|
34 |
+
|
35 |
+
def postprocess(text):
|
36 |
+
if args.plain:
|
37 |
+
return f"<pre>{text}</pre>"
|
38 |
+
return text
|
39 |
+
|
40 |
+
|
41 |
+
def predict(input, chatbot, max_length, top_p, temperature, messages):
|
42 |
+
chatbot.append((postprocess(input), ""))
|
43 |
+
messages.append(chatglm_cpp.ChatMessage(role="user", content=input))
|
44 |
+
|
45 |
+
generation_kwargs = dict(
|
46 |
+
max_length=max_length,
|
47 |
+
max_context_length=args.max_context_length,
|
48 |
+
do_sample=temperature > 0,
|
49 |
+
top_k=args.top_k,
|
50 |
+
top_p=top_p,
|
51 |
+
temperature=temperature,
|
52 |
+
repetition_penalty=args.repeat_penalty,
|
53 |
+
num_threads=args.threads,
|
54 |
+
stream=True,
|
55 |
+
)
|
56 |
+
|
57 |
+
response = ""
|
58 |
+
if args.mode == "chat":
|
59 |
+
chunks = []
|
60 |
+
for chunk in pipeline.chat(messages, **generation_kwargs):
|
61 |
+
response += chunk.content
|
62 |
+
chunks.append(chunk)
|
63 |
+
chatbot[-1] = (chatbot[-1][0], postprocess(response))
|
64 |
+
yield chatbot, messages
|
65 |
+
messages.append(pipeline.merge_streaming_messages(chunks))
|
66 |
+
else:
|
67 |
+
for chunk in pipeline.generate(input, **generation_kwargs):
|
68 |
+
response += chunk
|
69 |
+
chatbot[-1] = (chatbot[-1][0], postprocess(response))
|
70 |
+
yield chatbot, messages
|
71 |
+
|
72 |
+
yield chatbot, messages
|
73 |
+
|
74 |
+
|
75 |
+
def reset_user_input():
|
76 |
+
return gr.update(value="")
|
77 |
+
|
78 |
+
|
79 |
+
def reset_state():
|
80 |
+
return [], []
|
81 |
+
|
82 |
+
|
83 |
+
with gr.Blocks() as demo:
|
84 |
+
gr.HTML("""<h1 align="center">ChatGLM.cpp</h1>""")
|
85 |
+
|
86 |
+
chatbot = gr.Chatbot()
|
87 |
+
with gr.Row():
|
88 |
+
with gr.Column(scale=4):
|
89 |
+
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8)
|
90 |
+
submitBtn = gr.Button("Submit", variant="primary")
|
91 |
+
with gr.Column(scale=1):
|
92 |
+
max_length = gr.Slider(0, 2048, value=args.max_length, step=1.0, label="Maximum Length", interactive=True)
|
93 |
+
top_p = gr.Slider(0, 1, value=args.top_p, step=0.01, label="Top P", interactive=True)
|
94 |
+
temperature = gr.Slider(0, 1, value=args.temp, step=0.01, label="Temperature", interactive=True)
|
95 |
+
emptyBtn = gr.Button("Clear History")
|
96 |
+
|
97 |
+
messages = gr.State([])
|
98 |
+
|
99 |
+
submitBtn.click(
|
100 |
+
predict,
|
101 |
+
[user_input, chatbot, max_length, top_p, temperature, messages],
|
102 |
+
[chatbot, messages],
|
103 |
+
show_progress=True,
|
104 |
+
)
|
105 |
+
submitBtn.click(reset_user_input, [], [user_input])
|
106 |
+
|
107 |
+
emptyBtn.click(reset_state, outputs=[chatbot, messages], show_progress=True)
|
108 |
+
|
109 |
+
demo.queue().launch(share=False, inbrowser=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
tiktoken
|
3 |
+
chatglm-cpp
|
4 |
+
unstructured
|
5 |
+
unstructured[local-inference]
|
6 |
+
deepsparse-nightly[llm]
|