DemoOfDemos / app.py
lbiester's picture
Update app.py
431efbb verified
raw
history blame
2.99 kB
import gradio as gr
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from transformers import pipeline
from joblib import load
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch.nn.functional as F
# global variables to load models
lr_model = load("lr_model.joblib")
lr_vectorizer = load("vectorizer.joblib")
sentiment_pipe = pipeline("text-classification", model="finiteautomata/bertweet-base-sentiment-analysis")
bert_model = AutoModelForSequenceClassification.from_pretrained("./imdb-bert")
bert_tokenizer = AutoTokenizer.from_pretrained("./imdb-bert")
def greet(name):
return "Hello " + name + "!!"
def classify(text):
return {"cat": 0.3, "dog": 0.7}
def predict_sentiment(text, model):
if model == "finiteautomata/bertweet-base-sentiment-analysis":
out = sentiment_pipe(text, return_all_scores=True)
return {pred["label"]: pred["score"] for pred in out[0]}
elif model == "vader":
nltk.download('vader_lexicon')
sia = SentimentIntensityAnalyzer()
return sia.polarity_scores(text)
elif model == "custom logistic regression":
x = lr_vectorizer.transform([text])
pred = lr_model.predict_proba(x)[0]
return {"neg": pred[0], "pos": pred[1]}
elif model == "custom BERT":
pred = F.softmax(bert_model(**bert_tokenizer("I love you", return_tensors="pt")).logits[0], dim=0).tolist()
return {"neg": pred[0], "pos": pred[1]}
demo = gr.Blocks()
with demo:
gr.Markdown("A bunch of different Gradio demos in tabs.\n\nNote that generally, the code that is in each tab could be its own Gradio application!")
with gr.Tabs():
with gr.TabItem("Basic Hello"):
gr.Markdown('The most basic "Hello World"-type demo you can write')
interface = gr.Interface(fn=greet, inputs="text", outputs="text")
with gr.TabItem("Label Output"):
gr.Markdown("An example of a basic interface with a classification label as output")
interface = gr.Interface(fn=classify, inputs="text", outputs="label")
with gr.TabItem("Multiple Inputs"):
gr.Markdown("A more complex interface for sentiment analysis with multiple inputs, including a dropdown, and some examples")
interface = gr.Interface(
predict_sentiment,
[
gr.Textbox(placeholder="Your text input"),
gr.Dropdown(
["finiteautomata/bertweet-base-sentiment-analysis", "vader"], label="Model"
),
],
"label",
examples=[
["Happy smile", "vader"],
["Happy smile", "finiteautomata/bertweet-base-sentiment-analysis"],
["Sad frown", "vader"],
["Sad frown", "finiteautomata/bertweet-base-sentiment-analysis"],
]
)
demo.launch()