Jaehan's picture
Update app.py
c947288
raw
history blame contribute delete
No virus
767 Bytes
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import gradio as gr
model_name = "gpt2"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
def generate(text):
token_ids = tokenizer.encode(text, return_tensors="pt")
gpt2_tensors = model.generate(token_ids, max_length=200, no_repeat_ngram_size=True, num_beams=3)
#response= gpt2_tensors
response = ""
for i, x in enumerate(gpt2_tensors):
response += f"{i}: {tokenizer.decode(x, skip_special_tokens=True)}"
return response
in_text = gr.Textbox(lines=1, label="English", placeholder="English text here")
out = gr.Textbox(lines=1, label="Generated tensors")
gr.Interface(generate, inputs=in_text, outputs=out).launch()