Sawyer
commited on
Commit
•
b30f9c1
1
Parent(s):
9f2d2d6
feat: 增加 Cerebras 模型调用
Browse files
app.py
CHANGED
@@ -1,7 +1,26 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
from transformers import pipeline
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-13B")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-13B")
|
7 |
+
|
8 |
+
text = "Generative AI is "
|
9 |
+
|
10 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
11 |
+
|
12 |
+
def greet(speech):
|
13 |
+
generated_text = pipe(speech, max_length=50, do_sample=False, no_repeat_ngram_size=2)[0]
|
14 |
+
return generated_text['generated_text']
|
15 |
+
|
16 |
+
# def greet(speech):
|
17 |
+
# inputs = tokenizer(speech, return_tensors="pt")
|
18 |
+
# outputs = model.generate(**inputs, num_beams=5,
|
19 |
+
# max_new_tokens=50, early_stopping=True,
|
20 |
+
# no_repeat_ngram_size=2)
|
21 |
+
# text_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
22 |
+
# return text_output[0]
|
23 |
|
|
|
|
|
24 |
|
25 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
26 |
+
iface.launch(share=True)
|