|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
from transformers import pipeline |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-13B") |
|
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-13B") |
|
|
|
text = "Generative AI is " |
|
|
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
def greet(speech): |
|
generated_text = pipe(speech, max_length=50, do_sample=False, no_repeat_ngram_size=2)[0] |
|
return generated_text['generated_text'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs="text") |
|
iface.launch(share=True) |