|
import os |
|
from fastai.vision.all import * |
|
import gradio as gr |
|
import pickle |
|
import tempfile |
|
from transformers import AutoTokenizer, AutoModelWithLMHead |
|
from speechbrain.inference.interfaces import foreign_class |
|
|
|
|
|
|
|
|
|
|
|
|
|
learn_emotion = load_learner('emotions_vgg.pkl') |
|
learn_emotion_labels = learn_emotion.dls.vocab |
|
|
|
|
|
|
|
def predict(img): |
|
img = PILImage.create(img) |
|
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(img) |
|
predicted_emotion = learn_emotion_labels[pred_emotion_idx] |
|
return predicted_emotion |
|
|
|
|
|
|
|
title = "Facial Emotion Detector" |
|
|
|
description = gr.Markdown( |
|
"""Ever wondered what a person might be feeling looking at their picture? |
|
Well, now you can! Try this fun app. Just upload a facial image in JPG or |
|
PNG format. You can now see what they might have felt when the picture |
|
was taken. |
|
|
|
**Tip**: Be sure to only include face to get best results. Check some sample images |
|
below for inspiration!""").value |
|
|
|
article = gr.Markdown( |
|
"""**DISCLAIMER:** This model does not reveal the actual emotional state of a person. Use and |
|
interpret results at your own risk!. |
|
|
|
**PREMISE:** The idea is to determine an overall emotion of a person |
|
based on the pictures. We are restricting pictures to only include close-up facial |
|
images. |
|
|
|
**DATA:** FER2013 dataset consists of 48x48 pixel grayscale images of faces.Images |
|
are assigned one of the 7 emotions: Angry, Disgust, Fear, Happy, Sad, Surprise, and Neutral. |
|
|
|
""").value |
|
|
|
enable_queue=True |
|
|
|
examples = ["happy1.jpg","happy2.jpeg","netural.jpg","sad.jpeg","surprise.jpeg"] |
|
|
|
image_mode=gr.Interface(fn = predict, |
|
inputs = gr.Image( image_mode='L',label='Image'), |
|
outputs = [gr.Label(label='Emotion')], |
|
title = title, |
|
examples = examples, |
|
description = description, |
|
article=article, |
|
allow_flagging='never') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open("emotion_tokenizer.pkl", "rb") as f: |
|
tokenizer = pickle.load(f) |
|
|
|
with open("emotion_model.pkl", "rb") as f: |
|
model = pickle.load(f) |
|
|
|
|
|
|
|
def classify_emotion(text): |
|
|
|
input_ids = tokenizer.encode("emotion: " + text, return_tensors="pt") |
|
output = model.generate(input_ids) |
|
output_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
if output_text in ["joy", "love"]: |
|
return "Positive" |
|
elif output_text == "surprise": |
|
return "Neutral" |
|
else: |
|
return "Negative" |
|
return output_text |
|
|
|
|
|
|
|
|
|
|
|
text_title = "Text Emotion Detector" |
|
|
|
text_description = gr.Markdown( |
|
"""# Text Emotion Detector: The Mood Meter ๐๐คจ๐ |
|
|
|
## Introduction |
|
Welcome to our Text Emotion Detector, also known as The Mood Meter! ๐ญ This nifty tool helps you decipher the emotional rollercoaster hidden within any piece of text. Whether it's an enthusiastic rave, a stone-cold report, or a fiery rant, we'll break it down into three simple categories: positive, neutral, or negative vibes. Let's have some fun with words! ๐ |
|
|
|
## How It Works |
|
Our Mood Meter uses top-notch text analysis wizardry to understand the feels behind your words. We've trained it on mountains of text data and equipped it with cutting-edge algorithms to accurately predict whether your text is partying in positivity, chilling in neutrality, or burning with negativity. ๐ฅ |
|
|
|
|
|
## Usage: Let's Play the Sentiment Game! ๐ฎ๐ |
|
|
|
1. Type your text into the "Mood-o-Matic" text box. Bonus points for creativity! Remember, the weirder, the better! ๐คช |
|
2. Summon the Mood Genie by clicking the "Submit" button. Watch out for the magical sparkles! โจโจ |
|
3. Hold onto your hats (or tiaras) as the Mood-o-Matic works its mojo and reveals the sentiment verdict. Will it be sunshine and rainbows, or thunderclouds and lightning bolts? โกโ๏ธ |
|
4. React accordingly: If it's positive, do a happy dance worthy of a TikTok trend. If it's neutral, give a nonchalant nod like a cool cat. And if it's negative, let out a dramatic gasp that could rival a soap opera cliffhanger. Let's embrace the theatrics of sentiment analysis! ๐ญ |
|
|
|
Now, who said sentiment analysis couldn't be the highlight of your day? Let's turn those words into a mood-boosting adventure! ๐""").value |
|
|
|
text_article = gr.Markdown( |
|
""" |
|
## Premise |
|
We believe that understanding text sentiment shouldn't be dull. It's about diving into the sea of human expression and surfacing with a smile (or a frown, depending on the text). Our Mood Meter is here to make sentiment analysis a delightful journey. ๐ |
|
|
|
## Data |
|
Our Mood Meter has gobbled up datasets filled with everything from Shakespearean sonnets to social media rants. We've trained it to handle diverse dialects, and writing styles. Because emotions don't stick to a script, and neither do we! ๐ |
|
|
|
## Disclaimer |
|
While The Mood Meter aims to tickle your funny bone while analyzing sentiment, remember that text analysis is an art, not an exact science. Take our results with a sprinkle of salt (or confetti) and always trust your gut (or your funny bone). ๐โจ |
|
""").value |
|
|
|
enable_queue=True |
|
|
|
|
|
text_examples=[ |
|
|
|
"I aced my exam and received praise from my teacher for my hard work.", |
|
"I just got a promotion at work, and I'm feeling on top of the world!", |
|
"The sudden change in weather surprised everyone, but it didn't cause any inconvenience.", |
|
"I accidentally spilled coffee on my laptop, causing it to malfunction.", |
|
"I burnt my dinner while trying out a new recipe, and now I have nothing to eat." |
|
|
|
] |
|
|
|
|
|
text_model=gr.Interface(fn = classify_emotion, |
|
inputs = gr.Textbox( label='Text'), |
|
outputs = [gr.Textbox(label='Emotion')], |
|
title = text_title, |
|
examples = text_examples, |
|
description = text_description, |
|
article=text_article, |
|
allow_flagging='never') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier") |
|
|
|
def save_uploaded_file(uploaded_file): |
|
temp_dir = tempfile.TemporaryDirectory() |
|
file_path = os.path.join(temp_dir.name, uploaded_file.name) |
|
with open(file_path, "wb") as f: |
|
f.write(uploaded_file.getbuffer()) |
|
return file_path |
|
|
|
|
|
def emotion(file_path): |
|
|
|
if file_path: |
|
|
|
out_prob, score, index, text_lab = classifier.classify_file(file_path) |
|
if isinstance(text_lab, list): |
|
text_lab = text_lab[0] |
|
|
|
emotion_mapping = { |
|
'neu': 'Neutral', |
|
'ang': 'Angry', |
|
'hap': 'Happy', |
|
'sad': 'Sadness' |
|
} |
|
|
|
emotion_category = emotion_mapping.get(text_lab, 'Unknown') |
|
|
|
emotion_category = emotion_mapping.get(text_lab, 'Unknown') |
|
|
|
return emotion_category |
|
else: |
|
return "Please provide the path to an audio file." |
|
|
|
|
|
|
|
|
|
|
|
audio_model = gr.Interface(fn=emotion, inputs="textbox", outputs="textbox") |
|
|
|
|
|
|
|
|
|
|
|
HP_title = "Multimodal Sentiment Analysis: Feel the Emotion in Every Pixel, Word, and Sound!" |
|
HP_description = gr.Markdown( |
|
""" |
|
## Hey There! ๐ |
|
### Welcome to our spectacular project, Multimodal Sentiment Analysis! ๐ Here, we're all about unraveling the emotions tucked away in text, audio, and images. Think of us as your personal emotion whisperers across various platforms! |
|
## Why It's So Darn Cool ๐ |
|
Imagine this: understanding emotions unlocks the door to understanding people better. With our project, we're diving headfirst into a pool of sentiments! From heartwarming messages to catchy tunes, and from breathtaking landscapes to hilarious memes, we're decoding it all! |
|
## Explore Emotions Effortlessly with Tabs! ๐ |
|
Navigate through emotions seamlessly with our nifty tabs: |
|
- **Text Emotion Recognition**: Unravel the emotional rollercoaster hidden in every word! |
|
- **Image Emotion Recognition**: Peek into the feelings behind every snapshot! |
|
- **Audio Emotion Recognition**: Tune in to the vibes of emotions with every sound clip! |
|
## Meet Our Awesome Models ๐ |
|
### 1. Text Emotion Recognition |
|
This model is your go-to buddy for understanding the emotional vibe in written text! Whether it's a love letter or a tweet storm, our Text Emotion Recognition model has got your back, decrypting emotions like a champ! |
|
### 2. Image Emotion Recognition |
|
Ever wondered what feelings those grins, frowns, and winks in photos convey? Our Image Emotion Recognition model spills the beans! It's like having a personal mood interpreter for every pic you snap! |
|
### 3. Audio Emotion Recognition |
|
Listen up! Our Audio Emotion Recognition model tunes in to the subtle nuances of voice, capturing emotions in every syllable! From giggles to sobs and everything in between, it's your trusty sidekick for decoding the melodies of emotions! |
|
## Ready to Dive In? ๐ |
|
Getting started with Multimodal Sentiment Analysis is as easy as pie! Grab our user-friendly APIs and libraries, plug in the models for text, image, and audio emotion recognition, and voilร ! You'll be swimming in the sea of emotions like a pro in no time! |
|
## Meet the Fabulous Team Behind the Magic! ๐ง |
|
Let's give a round of applause to the brilliant minds who made it all happen: |
|
- **Pavan**: The wordsmith behind the Text Emotion Recognition model, spinning magic with language and algorithms! |
|
- **Abhiram**: The visionary behind the Image Emotion Recognition model, bringing pixels to life with emotion decoding powers! |
|
- **Karthik**: The audio maestro shaping the Audio Emotion Recognition model, capturing the symphony of emotions in every sound wave! |
|
- **Ganesh**: The glue holding it all together, orchestrating the dance of emotions and teamwork! |
|
""").value |
|
|
|
def greet(name): |
|
pass |
|
home_page=gr.Interface(fn = greet, |
|
inputs = gr.Textbox(label="Hey there! Ready to spice up your title game? Drop your name, and let's turn it into a giggling sensation!" |
|
), |
|
outputs = None, |
|
description = HP_description, |
|
theme='gradio/monochrome', |
|
title=HP_title, |
|
allow_flagging='never') |
|
|
|
|
|
|
|
main_model = gr.TabbedInterface([home_page,text_model, image_mode,audio_model], ["Home Page","Text Emotion Recognition", "Image Emotion Recognition" , "Audio Emotion Recognition"],theme='gradio/monochrome') |
|
|
|
main_model.launch() |