Spaces:
Sleeping
Sleeping
import torch | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
import streamlit as st | |
st.set_page_config( | |
page_title="GPT-2 Demo", | |
page_icon=":robot_face:", | |
layout="wide") | |
st.title("GPT-2 Text Generation Demo") | |
st.info("This is an GPT2 Text Generation Example using HuggingFace GPT2 Model") | |
pretrained = "gpt2-large" | |
tokenizer = GPT2Tokenizer.from_pretrained(pretrained) | |
model = GPT2LMHeadModel.from_pretrained(pretrained, pad_token_id=tokenizer.eos_token_id) | |
sentence = st.text_input('Input your sentence here:', value='My favorite ice cream flavor is ') | |
st.info("Max generated sentence: 100 words") | |
if (st.button("Generate")): | |
input_ids = tokenizer.encode(sentence, return_tensors='pt').to(device) | |
paragraph_generated = model.generate(input_ids, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True).to(device) | |
text = tokenizer.decode(paragraph_generated[0], skip_special_tokens=True) | |
st.write(text) |