Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
# Load pre-trained model and tokenizer | |
model_name = "gpt2" | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
def generate_response(prompt): | |
score1 = 0 | |
score2=0 | |
# Tokenize the prompt | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
# Generate response using beam search | |
output = model.generate(input_ids, max_length=100, | |
num_return_sequences=2, no_repeat_ngram_size=2, num_beams=5) | |
# Decode and store responses with basic scoring | |
responses = [] | |
for i, out in enumerate(output): | |
response = tokenizer.decode(out, skip_special_tokens=True) | |
responses.append(response) | |
return responses[0], score1, responses[1], score2 | |
# Gradio interface | |
iface = gr.Interface( | |
fn=generate_response, | |
inputs=["text"], | |
outputs=[gr.Textbox(label="Response 1"), | |
gr.Slider(0,5,interactive=True,label="score 1",step=1), | |
gr.Textbox(label="Response 2"), | |
gr.Slider(0,5,interactive=True,label="score 2",step=1)], | |
title="GROUP1_TASK1", | |
description="Enter a question to generate responses from GPT-2 model.", | |
) | |
iface.launch() |