Spaces:
Runtime error
Runtime error
File size: 1,674 Bytes
0f1c341 c9ace17 0f1c341 c9ace17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import AutoTokenizer, pipeline, AutoModelForSequenceClassification
# load the tokenizer and model from Hugging Face
tokenizer = AutoTokenizer.from_pretrained("ethanrom/a2")
model = AutoModelForSequenceClassification.from_pretrained("ethanrom/a2")
# define the classification labels
class_labels = ["Negative", "Positive", "No Impact", "Mixed"]
# create the zero-shot classification pipeline
classifier = pipeline("zero-shot-classification", model=model, tokenizer=tokenizer, device=0)
# define the Gradio interface
def predict_sentiment(text, model_choice):
if model_choice == "bert":
# use the default BERT sentiment analysis pipeline
sentiment_classifier = pipeline("sentiment-analysis", device=0)
result = sentiment_classifier(text)[0]
label = result["label"]
score = result["score"]
return f"{label} ({score:.2f})"
else:
# use the fine-tuned RoBERTa model for multi-class classification
labels = class_labels
hypothesis_template = "This text is about {}."
result = classifier(text, hypothesis_template=hypothesis_template, multi_class=True, labels=labels)
scores = result["scores"]
predicted_label = result["labels"][0]
return f"{predicted_label} ({scores[0]:.2f})"
# define the Gradio interface inputs and outputs
inputs = [gr.inputs.Textbox(label="Input Text"), gr.inputs.Radio(["bert", "fine-tuned RoBERTa"], label="Model Choice")]
outputs = gr.outputs.Textbox(label="Sentiment Prediction")
# create the Gradio interface
gr.Interface(predict_sentiment, inputs, outputs, title="Sentiment Analysis App").launch()
|