Zeug commited on
Commit
2abdb37
1 Parent(s): 0e348e9

Create chatyuan_gradio.py

Browse files
Files changed (1) hide show
  1. chatyuan_gradio.py +296 -0
chatyuan_gradio.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import ftplib
4
+ import threading
5
+ from tqdm.notebook import tqdm
6
+ import zipfile
7
+ import gradio as gr
8
+ import torch
9
+ # from transformers import T5Tokenizer, T5ForConditionalGeneration
10
+ from transformers import AutoTokenizer, AutoModel
11
+
12
+
13
+ def get_model_ftp(model_path, model_name):
14
+ ftp = ftplib.FTP('10.209.16.22')
15
+ ftp.login('soltest', 'soltest')
16
+ folder_path = '/ftp/3D/ai-model/ChatYuan/ClueAI/'
17
+ ftp.cwd(folder_path)
18
+
19
+ file_list = ftp.nlst(folder_path)
20
+ if os.path.join(folder_path, model_name) in file_list:
21
+ # 获取远程文件的大小
22
+ file_size = ftp.size(model_name)
23
+
24
+ # 创建本地文件,并用二进制写模式打开
25
+ with open(os.path.join(model_path, model_name), 'wb') as f:
26
+ # 下载文件并显示进度条
27
+ with tqdm.wrapattr(f, 'write', desc="Download " + model_name, total=file_size, unit='B', unit_scale=True) as pbar:
28
+ ftp.retrbinary('RETR ' + model_name, pbar.write)
29
+
30
+ ftp.quit()
31
+ unzip(model_path, model_name)
32
+
33
+
34
+
35
+ def unzip(path, file_name):
36
+ try:
37
+ stop_unzip = threading.Event()
38
+ thread = threading.Thread(target=print_flush, args=(stop_unzip, "start decompression "))
39
+ thread.start()
40
+ zip_file = zipfile.ZipFile(os.path.join(path, file_name))
41
+ for names in zip_file.namelist():
42
+ zip_file.extract(names, path)
43
+ zip_file.close()
44
+
45
+ stop_unzip.set()
46
+ thread.join()
47
+
48
+ except Exception as ex:
49
+ stop_unzip.set()
50
+ thread.join()
51
+ os.remove(os.path.join(path, file_name))
52
+ raise Exception(f"\nunzip失败:" + str(ex))
53
+
54
+
55
+ def prepare_model(model_dir):
56
+ model_path = model_dir.split('/')[0]
57
+ model_name = model_dir.split('/')[1]
58
+ if not os.path.exists(model_dir):
59
+ os.makedirs("ClueAI", exist_ok=True)
60
+ get_model_ftp(model_path, model_name + '.zip')
61
+ os.remove(os.path.join(model_path, model_name + '.zip'))
62
+
63
+
64
+ def print_flush(stop_event, str):
65
+ loading_strings = [str + ".", str + "..", str + "...", str + ".", str + "..", str + "..."]
66
+ index = 0
67
+
68
+ while not stop_event.is_set():
69
+ loading_str = loading_strings[index]
70
+ print(loading_str, end="\r")
71
+ index = (index + 1) % len(loading_strings)
72
+ time.sleep(0.5)
73
+
74
+ # Refresh the loading string every three cycles
75
+ if index == 0:
76
+ print(" " * len(loading_str), end="\r")
77
+ time.sleep(0.2)
78
+ print(loading_strings[index], end="\r")
79
+ print("\n" + str.split(" ")[1] + " finish.")
80
+
81
+
82
+ model_dir = 'ClueAI/ChatYuan-large-v2'
83
+ prepare_model(model_dir)
84
+
85
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
86
+ model = AutoModel.from_pretrained(model_dir, trust_remote_code=True)
87
+
88
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
89
+ model.to(device)
90
+ # model.half()
91
+
92
+
93
+ def preprocess(text):
94
+ base_info = ""
95
+ text = f"{base_info}{text}"
96
+ text = text.replace("\n", "\\n").replace("\t", "\\t")
97
+ return text
98
+
99
+
100
+ def postprocess(text):
101
+ return text.replace("\\n", "\n").replace("\\t", "\t").replace(
102
+ '%20', ' ') # .replace(" ", " ")
103
+
104
+
105
+ generate_config = {
106
+ 'do_sample': True,
107
+ 'top_p': 0.9,
108
+ 'top_k': 50,
109
+ 'temperature': 0.7,
110
+ 'num_beams': 1,
111
+ 'max_length': 1024,
112
+ 'min_length': 3,
113
+ 'no_repeat_ngram_size': 5,
114
+ 'length_penalty': 0.6,
115
+ 'return_dict_in_generate': True,
116
+ 'output_scores': True
117
+ }
118
+
119
+
120
+ def answer(
121
+ text,
122
+ top_p,
123
+ temperature,
124
+ sample=True,
125
+ ):
126
+ '''
127
+ sample:是否抽样。生成任务,可以设置为True;
128
+ top_p:0-1之间,生成的内容越多样
129
+ '''
130
+ text = preprocess(text)
131
+ encoding = tokenizer(text=[text],
132
+ truncation=True,
133
+ padding=True,
134
+ max_length=1024,
135
+ return_tensors="pt").to(device)
136
+ if not sample:
137
+ out = model.generate(**encoding,
138
+ return_dict_in_generate=True,
139
+ output_scores=False,
140
+ max_new_tokens=1024,
141
+ num_beams=1,
142
+ length_penalty=0.6)
143
+ else:
144
+ out = model.generate(**encoding,
145
+ return_dict_in_generate=True,
146
+ output_scores=False,
147
+ max_new_tokens=1024,
148
+ do_sample=True,
149
+ top_p=top_p,
150
+ temperature=temperature,
151
+ no_repeat_ngram_size=12)
152
+ # out=model.generate(**encoding, **generate_config)
153
+ out_text = tokenizer.batch_decode(out["sequences"],
154
+ skip_special_tokens=True)
155
+ return postprocess(out_text[0])
156
+
157
+
158
+ def clear_session():
159
+ return '', None
160
+
161
+
162
+ def chatyuan_bot(input, history, top_p, temperature, num):
163
+ history = history or []
164
+ if len(history) > num:
165
+ history = history[-num:]
166
+
167
+ context = "\n".join([
168
+ f"用户:{input_text}\n小元:{answer_text}"
169
+ for input_text, answer_text in history
170
+ ])
171
+
172
+ input_text = context + "\n用户:" + input + "\n小元:"
173
+ input_text = input_text.strip()
174
+ output_text = answer(input_text, top_p, temperature)
175
+ print("open_model".center(20, "="))
176
+ print(f"{input_text}\n{output_text}")
177
+ history.append((input, output_text))
178
+ return '', history, history
179
+
180
+
181
+ def chatyuan_bot_regenerate(input, history, top_p, temperature, num):
182
+ history = history or []
183
+
184
+ if history:
185
+ input = history[-1][0]
186
+ history = history[:-1]
187
+
188
+ if len(history) > num:
189
+ history = history[-num:]
190
+
191
+ context = "\n".join([
192
+ f"用户:{input_text}\n小元:{answer_text}"
193
+ for input_text, answer_text in history
194
+ ])
195
+
196
+ input_text = context + "\n用户:" + input + "\n小元:"
197
+ input_text = input_text.strip()
198
+ output_text = answer(input_text, top_p, temperature)
199
+ print("open_model".center(20, "="))
200
+ print(f"{input_text}\n{output_text}")
201
+ history.append((input, output_text))
202
+ return '', history, history
203
+
204
+
205
+ block = gr.Blocks()
206
+
207
+ with block as demo:
208
+ gr.Markdown("""<h1><center>元语智能——ChatYuan</center></h1>
209
+ <font size=4>回答来自ChatYuan, 是模型生成的结果, 请谨慎辨别和参考, 不代表任何人观点 | Answer generated by ChatYuan model</font>
210
+ <font size=4>注意:gradio对markdown代码格式展示有限</font>
211
+ """)
212
+ with gr.Row():
213
+ with gr.Column(scale=3):
214
+ chatbot = gr.Chatbot(label='ChatYuan').style(height=400)
215
+
216
+ with gr.Column(scale=1):
217
+ num = gr.Slider(minimum=4,
218
+ maximum=10,
219
+ label="最大的对话轮数",
220
+ value=5,
221
+ step=1)
222
+ top_p = gr.Slider(minimum=0,
223
+ maximum=1,
224
+ label="top_p",
225
+ value=1,
226
+ step=0.1)
227
+ temperature = gr.Slider(minimum=0,
228
+ maximum=1,
229
+ label="temperature",
230
+ value=0.7,
231
+ step=0.1)
232
+ clear_history = gr.Button("👋 清除历史对话 | Clear History")
233
+ send = gr.Button("🚀 发送 | Send")
234
+ regenerate = gr.Button("🚀 重新生成本次结果 | regenerate")
235
+ message = gr.Textbox()
236
+ state = gr.State()
237
+ message.submit(chatyuan_bot,
238
+ inputs=[message, state, top_p, temperature, num],
239
+ outputs=[message, chatbot, state])
240
+ regenerate.click(chatyuan_bot_regenerate,
241
+ inputs=[message, state, top_p, temperature, num],
242
+ outputs=[message, chatbot, state])
243
+ send.click(chatyuan_bot,
244
+ inputs=[message, state, top_p, temperature, num],
245
+ outputs=[message, chatbot, state])
246
+
247
+ clear_history.click(fn=clear_session,
248
+ inputs=[],
249
+ outputs=[chatbot, state],
250
+ queue=False)
251
+
252
+
253
+ block = gr.Blocks()
254
+ with block as introduction:
255
+ gr.Markdown("""<h1><center>元语智能——ChatYuan</center></h1>
256
+
257
+ <font size=4>😉ChatYuan: 元语功能型对话大模型 | General Model for Dialogue with ChatYuan
258
+ <br>
259
+ 👏ChatYuan-large-v2是一个支持中英双语的功能型对话语言大模型,是继ChatYuan系列中ChatYuan-large-v1开源后的又一个开源模型。ChatYuan-large-v2使用了和 v1版本相同的技术方案,在微调数据、人类反馈强化学习、思维链等方面进行了优化。
260
+ <br>
261
+ ChatYuan large v2 is an open-source large language model for dialogue, supports both Chinese and English languages, and in ChatGPT style.
262
+ <br>
263
+ ChatYuan-large-v2是ChatYuan系列中以轻量化实现高质量效果的模型之一,用户可以在消费级显卡、 PC甚至手机上进行推理(INT4 最低只需 400M )。
264
+ <br>
265
+ 在Chatyuan-large-v1的原有功能的基础上,我们给模型进行了如下优化:
266
+ - 新增了中英双语对话能力。
267
+ - 新增了拒答能力。对于一些危险、有害的问题,学会了拒答处理。
268
+ - 新增了代码生成功能。对于基础代码生成进行了一定程度优化。
269
+ - 增强了基础能力。原有上下文问答、创意性写作能力明显提升。
270
+ - 新增了表格生成功能。使生成的表格内容和格式更适配。
271
+ - 增强了基础数学运算能力。
272
+ - 最大长度token数扩展到4096。
273
+ - 增强了模拟情景能力。.<br>
274
+ <br>
275
+ Based on the original functions of Chatyuan-large-v1, we optimized the model as follows:
276
+ -Added the ability to speak in both Chinese and English.
277
+ -Added the ability to refuse to answer. Learn to refuse to answer some dangerous and harmful questions.
278
+ -Added code generation functionality. Basic code generation has been optimized to a certain extent.
279
+ -Enhanced basic capabilities. The original contextual Q&A and creative writing skills have significantly improved.
280
+ -Added a table generation function. Make the generated table content and format more appropriate.
281
+ -Enhanced basic mathematical computing capabilities.
282
+ -The maximum number of length tokens has been expanded to 4096.
283
+ -Enhanced ability to simulate scenarios< br>
284
+ <br>
285
+ 👀<a href='https://www.cluebenchmarks.com/clueai.html'>PromptCLUE-large</a>在1000亿token中文语料上预训练, 累计学习1.5万亿中文token, 并且在数百种任务上进行Prompt任务式训练. 针对理解类任务, 如分类、情感分析、抽取等, 可以自定义标签体系; 针对多种生成任务, 可以进行采样自由生成. <br>
286
+ <br>
287
+ &nbsp; <a href='https://modelscope.cn/models/ClueAI/ChatYuan-large/summary' target="_blank">ModelScope</a> &nbsp; | &nbsp; <a href='https://huggingface.co/ClueAI/ChatYuan-large-v1' target="_blank">Huggingface</a> &nbsp; | &nbsp; <a href='https://www.clueai.cn' target="_blank">官网体验场</a> &nbsp; | &nbsp; <a href='https://github.com/clue-ai/clueai-python#ChatYuan%E5%8A%9F%E8%83%BD%E5%AF%B9%E8%AF%9D' target="_blank">ChatYuan-API</a> &nbsp; | &nbsp; <a href='https://github.com/clue-ai/ChatYuan' target="_blank">Github项目地址</a> &nbsp; | &nbsp; <a href='https://openi.pcl.ac.cn/ChatYuan/ChatYuan/src/branch/main/Fine_tuning_ChatYuan_large_with_pCLUE.ipynb' target="_blank">OpenI免费试用</a> &nbsp;
288
+ </font>
289
+ <center><a href="https://clustrmaps.com/site/1bts0" title="Visit tracker"><img src="//www.clustrmaps.com/map_v2.png?d=ycVCe17noTYFDs30w7AmkFaE-TwabMBukDP1802_Lts&cl=ffffff" /></a></center>
290
+ """)
291
+
292
+ gui = gr.TabbedInterface(
293
+ interface_list=[introduction, demo],
294
+ tab_names=["相关介绍 | Introduction", "开源模型 | Online Demo"])
295
+ # gui.launch(quiet=True, show_api=False, share=True)
296
+