AbdulrahmanCS's picture
Create app.py
e7b38b6 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # or AutoModelForCausalLM, etc.
# Replace this with the actual path to your model on HF hub
MODEL_NAME = "AbdulrahmanCS/my_finetuned_model"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
# You can wrap it in a pipeline, or do manual model calls.
# For demonstration, let’s create a small function that does text generation:
def generate_interview_questions(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_length=128,
num_beams=4,
num_return_sequences=1,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Now define the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# My Fine-Tuned Interview Question Generation Model")
with gr.Row():
input_text = gr.Textbox(label="Enter prompt or context", lines=3)
output_text = gr.Textbox(label="Generated question(s)")
generate_button = gr.Button("Generate")
generate_button.click(fn=generate_interview_questions, inputs=input_text, outputs=output_text)
if __name__ == "__main__":
demo.launch()