Kumarkishalaya's picture
Update app.py
4619405 verified
raw
history blame
No virus
1.57 kB
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import gradio as gr
import torch
trained_tokenizer = GPT2Tokenizer.from_pretrained("Kumarkishalaya/GPT-2-next-word-prediction")
trained_model = GPT2LMHeadModel.from_pretrained("Kumarkishalaya/GPT-2-next-word-prediction")
untrained_model = GPT2Tokenizer.from_pretrained("gpt2")
untrained_tokenizer = ("gpt2")
device = "cuda" if torch.cuda.is_available() else "cpu"
trained_model.to(device)
untrained_model.to(device)
def generate(commentary_text):
# Generate text using the trained model
input_ids = trained_tokenizer(commentary_text, return_tensors="pt").input_ids.to(device)
trained_output = trained_model.generate(input_ids, max_length=60, num_beams=5, do_sample=False)
trained_text = trained_tokenizer.decode(trained_output[0], skip_special_tokens=True)
# Generate text using the untrained model
input_ids = untrained_tokenizer(commentary_text, return_tensors="pt").input_ids.to(device)
untrained_output = untrained_model.generate(input_ids, max_length=60, num_beams=5, do_sample=False)
untrained_text = untrained_tokenizer.decode(untrained_output[0], skip_special_tokens=True)
return trained_text, untrained_text
# Create Gradio interface
iface = gr.Interface(
fn=generate,
inputs="text",
outputs=["text", "text"],
title="GPT-2 Text Generation",
description="start writing a cricket commentary and GPT-2 will continue it using both a trained and untrained model."
)
# Launch the app
if __name__ == "__main__":
iface.launch(share=True)