ethanrom-a2 / app.py
ethanrom's picture
Update app.py
cbb33a8
raw
history blame
No virus
1.56 kB
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
# Load the fine-tuned model and tokenizer
model_name = "ethanrom/a2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Load the pretrained model and tokenizer
pretrained_model_name = "roberta-large-mnli"
pretrained_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
pretrained_model = pipeline("zero-shot-classification", model=pretrained_model_name, tokenizer=pretrained_tokenizer)
candidate_labels = ["negative", "positive", "no impact", "mixed"]
def predict_sentiment(text_input, model_selection):
if model_selection == "Fine-tuned":
# Use the fine-tuned model
inputs = tokenizer.encode_plus(text_input, return_tensors='pt')
outputs = model(**inputs)
logits = outputs.logits.detach().cpu().numpy()[0]
predicted_class = int(logits.argmax())
return candidate_labels[predicted_class]
else:
# Use the pretrained model
result = pretrained_model(text_input, candidate_labels)
predicted_class = result["labels"][0]
return predicted_class
inputs = [
gr.inputs.Textbox("Enter text"),
gr.inputs.Dropdown(["Pretrained", "Fine-tuned"], label="Select model"),
]
outputs = gr.outputs.Textbox(label="Predicted Sentiment")
gr.Interface(fn=predict_sentiment, inputs=inputs, outputs=outputs, title="Sentiment Analysis", description="Compare the output of two models").launch();