Cerebras / app.py
Sawyer
refactor: 选择2.7B模型
9ecf0c0
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline
# tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-13B")
# model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-13B")
# tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-2.7B")
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-2.7B")
text = "Generative AI is "
# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
pipe = pipeline("text-generation", model=model)
def greet(speech):
generated_text = pipe(speech, max_length=50, do_sample=False, no_repeat_ngram_size=2)[0]
return generated_text['generated_text']
# def greet(speech):
# inputs = tokenizer(speech, return_tensors="pt")
# outputs = model.generate(**inputs, num_beams=5,
# max_new_tokens=50, early_stopping=True,
# no_repeat_ngram_size=2)
# text_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
# return text_output[0]
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch(share=True)