File size: 1,168 Bytes
9f2d2d6
b30f9c1
 
 
f4932ec
7138875
9ecf0c0
 
b30f9c1
 
 
cc972d1
f4932ec
b30f9c1
 
 
 
 
 
 
 
 
 
 
 
9f2d2d6
 
 
b30f9c1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline

# tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-13B")
# model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-13B")
# tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-2.7B")
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-2.7B")

text = "Generative AI is "

# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
pipe = pipeline("text-generation", model=model)

def greet(speech):
    generated_text = pipe(speech, max_length=50, do_sample=False, no_repeat_ngram_size=2)[0]
    return generated_text['generated_text']

# def greet(speech):
#     inputs = tokenizer(speech, return_tensors="pt")
#     outputs = model.generate(**inputs, num_beams=5, 
#                             max_new_tokens=50, early_stopping=True,
#                             no_repeat_ngram_size=2)
#     text_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
#     return text_output[0]


iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch(share=True)