Jerome2046 commited on
Commit
c0f9977
1 Parent(s): 32f23a4

Upload 2 files

Browse files
Files changed (2) hide show
  1. web_demo.py +167 -0
  2. web_demo.sh +9 -0
web_demo.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+
3
+ import gradio as gr
4
+ import mdtex2html
5
+
6
+ import torch
7
+ import transformers
8
+ from transformers import (
9
+ AutoConfig,
10
+ AutoModel,
11
+ AutoTokenizer,
12
+ AutoTokenizer,
13
+ DataCollatorForSeq2Seq,
14
+ HfArgumentParser,
15
+ Seq2SeqTrainingArguments,
16
+ set_seed,
17
+ )
18
+
19
+ from arguments import ModelArguments, DataTrainingArguments
20
+
21
+
22
+ model = None
23
+ tokenizer = None
24
+
25
+ """Override Chatbot.postprocess"""
26
+
27
+
28
+ def postprocess(self, y):
29
+ if y is None:
30
+ return []
31
+ for i, (message, response) in enumerate(y):
32
+ y[i] = (
33
+ None if message is None else mdtex2html.convert((message)),
34
+ None if response is None else mdtex2html.convert(response),
35
+ )
36
+ return y
37
+
38
+
39
+ gr.Chatbot.postprocess = postprocess
40
+
41
+
42
+ def parse_text(text):
43
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
44
+ lines = text.split("\n")
45
+ lines = [line for line in lines if line != ""]
46
+ count = 0
47
+ for i, line in enumerate(lines):
48
+ if "```" in line:
49
+ count += 1
50
+ items = line.split('`')
51
+ if count % 2 == 1:
52
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
53
+ else:
54
+ lines[i] = f'<br></code></pre>'
55
+ else:
56
+ if i > 0:
57
+ if count % 2 == 1:
58
+ line = line.replace("`", "\`")
59
+ line = line.replace("<", "&lt;")
60
+ line = line.replace(">", "&gt;")
61
+ line = line.replace(" ", "&nbsp;")
62
+ line = line.replace("*", "&ast;")
63
+ line = line.replace("_", "&lowbar;")
64
+ line = line.replace("-", "&#45;")
65
+ line = line.replace(".", "&#46;")
66
+ line = line.replace("!", "&#33;")
67
+ line = line.replace("(", "&#40;")
68
+ line = line.replace(")", "&#41;")
69
+ line = line.replace("$", "&#36;")
70
+ lines[i] = "<br>"+line
71
+ text = "".join(lines)
72
+ return text
73
+
74
+
75
+ def predict(input, chatbot, max_length, top_p, temperature, history, past_key_values):
76
+ chatbot.append((parse_text(input), ""))
77
+ for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
78
+ return_past_key_values=True,
79
+ max_length=max_length, top_p=top_p,
80
+ temperature=temperature):
81
+ chatbot[-1] = (parse_text(input), parse_text(response))
82
+
83
+ yield chatbot, history, past_key_values
84
+
85
+
86
+ def reset_user_input():
87
+ return gr.update(value='')
88
+
89
+
90
+ def reset_state():
91
+ return [], [], None
92
+
93
+
94
+ with gr.Blocks() as demo:
95
+ gr.HTML("""<h1 align="center">ChatGLM2-6B</h1>""")
96
+
97
+ chatbot = gr.Chatbot()
98
+ with gr.Row():
99
+ with gr.Column(scale=4):
100
+ with gr.Column(scale=12):
101
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
102
+ container=False)
103
+ with gr.Column(min_width=32, scale=1):
104
+ submitBtn = gr.Button("Submit", variant="primary")
105
+ with gr.Column(scale=1):
106
+ emptyBtn = gr.Button("Clear History")
107
+ max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="Maximum length", interactive=True)
108
+ top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
109
+ temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
110
+
111
+ history = gr.State([])
112
+ past_key_values = gr.State(None)
113
+
114
+ submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
115
+ [chatbot, history, past_key_values], show_progress=True)
116
+ submitBtn.click(reset_user_input, [], [user_input])
117
+
118
+ emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
119
+
120
+
121
+ def main():
122
+ global model, tokenizer
123
+
124
+ parser = HfArgumentParser((
125
+ ModelArguments))
126
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
127
+ # If we pass only one argument to the script and it's the path to a json file,
128
+ # let's parse it to get our arguments.
129
+ model_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
130
+ else:
131
+ model_args = parser.parse_args_into_dataclasses()[0]
132
+
133
+ tokenizer = AutoTokenizer.from_pretrained(
134
+ model_args.model_name_or_path, trust_remote_code=True)
135
+ config = AutoConfig.from_pretrained(
136
+ model_args.model_name_or_path, trust_remote_code=True)
137
+
138
+ config.pre_seq_len = model_args.pre_seq_len
139
+ config.prefix_projection = model_args.prefix_projection
140
+
141
+ if model_args.ptuning_checkpoint is not None:
142
+ print(f"Loading prefix_encoder weight from {model_args.ptuning_checkpoint}")
143
+ model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
144
+ prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin"))
145
+ new_prefix_state_dict = {}
146
+ for k, v in prefix_state_dict.items():
147
+ if k.startswith("transformer.prefix_encoder."):
148
+ new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
149
+ model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
150
+ else:
151
+ model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
152
+
153
+ if model_args.quantization_bit is not None:
154
+ print(f"Quantized to {model_args.quantization_bit} bit")
155
+ model = model.quantize(model_args.quantization_bit)
156
+ model = model.cuda()
157
+ if model_args.pre_seq_len is not None:
158
+ # P-tuning v2
159
+ model.transformer.prefix_encoder.float()
160
+
161
+ model = model.eval()
162
+ demo.queue().launch(share=True, inbrowser=True)
163
+
164
+
165
+
166
+ if __name__ == "__main__":
167
+ main()
web_demo.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ PRE_SEQ_LEN=128
2
+ LR=1e-4
3
+ STEPS=600
4
+
5
+ CUDA_VISIBLE_DEVICES=0 python3 web_demo.py \
6
+ --model_name_or_path THUDM/chatglm2-6b \
7
+ --ptuning_checkpoint ./ \
8
+ --pre_seq_len $PRE_SEQ_LEN
9
+