Kumarkishalaya's picture
Update app.py
6b02864 verified
raw
history blame
No virus
1.08 kB
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import gradio as gr
import torch
trained_tokenizer = GPT2Tokenizer.from_pretrained("Kumarkishalaya/GPT-2-next-word-prediction")
trained_model = GPT2LMHeadModel.from_pretrained("Kumarkishalaya/GPT-2-next-word-prediction")
untrained_model = GPT2Tokenizer.from_pretrained("gpt2")
untrained_tokenizer = ("gpt2")
device = "cuda" if torch.cuda.is_available() else "cpu"
def generate(commentary_text):
input_ids = trained_tokenizer(commentary_text, return_tensors="pt")
input_ids = input_ids['input_ids'].to(device)
output = trained_model.generate(input_ids, max_length=60, num_beams=5, do_sample=False)
return trained_tokenizer.decode(output[0])
# Create Gradio interface
iface = gr.Interface(fn=generate,
inputs="text",
outputs="text",
title="GPT-2 Text Generation",
description="Enter a prompt and GPT-2 will generate the continuation of the text.")
# Launch the app
if __name__ == "__main__":
iface.launch(share=True)