Update app.py
Browse files
app.py
CHANGED
@@ -1,42 +1,29 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
prediction = cl.generate(
|
31 |
-
model_name='clueai-base',
|
32 |
-
prompt=prompt)
|
33 |
-
|
34 |
-
return "小矢机器人:" + prediction.generations[0].text + "!"
|
35 |
-
|
36 |
-
with gr.Blocks() as demo:
|
37 |
-
question = gr.Textbox(label="Question")
|
38 |
-
output = gr.Textbox(label="Answer Box")
|
39 |
-
greet_btn = gr.Button("Ask")
|
40 |
-
greet_btn.click(fn=greet, inputs=question, outputs=output)
|
41 |
-
|
42 |
-
demo.launch()
|
|
|
1 |
+
# 加载模型
|
2 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
3 |
+
tokenizer = T5Tokenizer.from_pretrained("ClueAI/ChatYuan-large-v1")
|
4 |
+
model = T5ForConditionalGeneration.from_pretrained("ClueAI/ChatYuan-large-v1")
|
5 |
+
# 使用
|
6 |
+
import torch
|
7 |
+
from transformers import AutoTokenizer
|
8 |
+
# 修改colab笔记本设置为gpu,推理更快
|
9 |
+
device = torch.device('cuda')
|
10 |
+
model.to(device)
|
11 |
+
def preprocess(text):
|
12 |
+
text = text.replace("\n", "\\n").replace("\t", "\\t")
|
13 |
+
return text
|
14 |
+
|
15 |
+
def postprocess(text):
|
16 |
+
return text.replace("\\n", "\n").replace("\\t", "\t")
|
17 |
+
|
18 |
+
def answer(text, sample=True, top_p=1, temperature=0.7):
|
19 |
+
'''sample:是否抽样。生成任务,可以设置为True;
|
20 |
+
top_p:0-1之间,生成的内容越多样'''
|
21 |
+
text = preprocess(text)
|
22 |
+
encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=768, return_tensors="pt").to(device)
|
23 |
+
if not sample:
|
24 |
+
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=512, num_beams=1, length_penalty=0.6)
|
25 |
+
else:
|
26 |
+
out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=512, do_sample=True, top_p=top_p, temperature=temperature, no_repeat_ngram_size=3)
|
27 |
+
out_text = tokenizer.batch_decode(out["sequences"], skip_special_tokens=True)
|
28 |
+
return postprocess(out_text[0])
|
29 |
+
print("end...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|