finetuned_gpt / app.py
wenjun99's picture
Update app.py
f8c3d9d verified
import gradio as gr
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Load Fine-tuned GPT-2 Model from Hugging Face
model = GPT2LMHeadModel.from_pretrained("wenjun99/gpt2-finetuned")
tokenizer = GPT2Tokenizer.from_pretrained("wenjun99/gpt2-finetuned")
# Define Response Generation Function
def generate_response(query):
input_text = f"Query: {query}\nTask:"
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_length=24, pad_token_id=tokenizer.eos_token_id)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# πŸ€– Fine-Tuned GPT-2 Chatbot")
gr.Markdown("Enter a query to see how the fine-tuned GPT-2 model responds.")
query_input = gr.Textbox(label="Enter Query")
generate_btn = gr.Button("Generate Response")
output_text = gr.Textbox(label="Generated Response")
generate_btn.click(generate_response, inputs=query_input, outputs=output_text)
# Launch Gradio App
demo.launch()