Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +117 -0
- demo.jpg +0 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import tempfile
|
4 |
+
import time
|
5 |
+
import streamlit as st
|
6 |
+
from PIL import Image
|
7 |
+
from io import BytesIO
|
8 |
+
import plotly.graph_objs as go
|
9 |
+
from transformers import CLIPProcessor, CLIPModel
|
10 |
+
from torch.cuda import is_available
|
11 |
+
|
12 |
+
MODEL_ID = "openai/clip-vit-base-patch32"
|
13 |
+
|
14 |
+
DEMO_IMAGE = 'demo.jpg'
|
15 |
+
|
16 |
+
EMOTION_DICT = {
|
17 |
+
0: ['Angry', '๐ก'],
|
18 |
+
1: ['Disgusted', '๐คข'],
|
19 |
+
2: ['Fearful', '๐จ'],
|
20 |
+
3: ['Happy', '๐'],
|
21 |
+
4: ['Neutral', '๐'],
|
22 |
+
5: ['Sad', 'โน๏ธ'],
|
23 |
+
6: ['Surprised', '๐ฎ']
|
24 |
+
}
|
25 |
+
|
26 |
+
device = 'cuda' if is_available() else 'cpu'
|
27 |
+
|
28 |
+
@st.cache_data
|
29 |
+
def load_model():
|
30 |
+
processor = CLIPProcessor.from_pretrained(MODEL_ID)
|
31 |
+
model = CLIPModel.from_pretrained(MODEL_ID)
|
32 |
+
return processor, model
|
33 |
+
|
34 |
+
@st.cache_data
|
35 |
+
def load_token_embds():
|
36 |
+
emotions = list(EMOTION_DICT.values())
|
37 |
+
desc = [f'a photo of a {emotion[0]} person' for emotion in emotions]
|
38 |
+
tok = processor(text = desc, return_tensors = 'pt', images = None, padding = True).to(device)
|
39 |
+
tok_emb = model.get_text_features(**tok)
|
40 |
+
tok_emb = tok_emb.detach().cpu().numpy() / np.linalg.norm(tok_emb.detach().cpu().numpy(), axis=0)
|
41 |
+
return tok_emb
|
42 |
+
|
43 |
+
st.set_page_config(page_title="Mood Scope", page_icon="๐ญ")
|
44 |
+
st.title('Mood-Scope')
|
45 |
+
st.sidebar.title('Options')
|
46 |
+
|
47 |
+
app_mode = st.sidebar.selectbox('Choose Page', ['About the App', 'Run Mood Scope'])
|
48 |
+
|
49 |
+
st.markdown(
|
50 |
+
"""
|
51 |
+
<style>
|
52 |
+
[data-testid = 'stSidebar'][aria-expanded = 'true'] > div:first-child{
|
53 |
+
width: 350px
|
54 |
+
}
|
55 |
+
[data-testid = 'stSidebar'][aria-expanded = 'false'] > div:first-child{
|
56 |
+
width: 350px
|
57 |
+
margin-left: -350px
|
58 |
+
}
|
59 |
+
</style>
|
60 |
+
""", unsafe_allow_html = True
|
61 |
+
)
|
62 |
+
|
63 |
+
if app_mode == 'About the App':
|
64 |
+
st.markdown('Will edit this later!!')
|
65 |
+
|
66 |
+
elif app_mode == 'Run Mood Scope':
|
67 |
+
|
68 |
+
processor, model = load_model()
|
69 |
+
|
70 |
+
st.sidebar.markdown('---')
|
71 |
+
|
72 |
+
with st.columns(3)[1]:
|
73 |
+
kpi = st.markdown('**Dominant Detected Emotion**')
|
74 |
+
emotion_emoji = st.markdown('-')
|
75 |
+
#emotion_text = st.markdown('-')
|
76 |
+
|
77 |
+
img_file_buffer = st.sidebar.file_uploader('Upload an Image', type = ['jpg', 'png', 'jpeg'])
|
78 |
+
if img_file_buffer:
|
79 |
+
buffer = BytesIO(img_file_buffer.read())
|
80 |
+
data = np.frombuffer(buffer.getvalue(), dtype=np.uint8)
|
81 |
+
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
|
82 |
+
else:
|
83 |
+
demo_image = DEMO_IMAGE
|
84 |
+
image = cv2.imread(demo_image, cv2.IMREAD_COLOR)
|
85 |
+
|
86 |
+
st.sidebar.text('Original Image')
|
87 |
+
st.sidebar.image(image, channels = 'BGR')
|
88 |
+
|
89 |
+
im_proc = processor(images=image, return_tensors='pt')['pixel_values']
|
90 |
+
im_emb = model.to(device).get_image_features(im_proc.to(device))
|
91 |
+
im_emb = im_emb.detach().cpu().numpy()
|
92 |
+
|
93 |
+
tok_emb = load_token_embds()
|
94 |
+
score = np.dot(im_emb, tok_emb.T)
|
95 |
+
|
96 |
+
output_emoji = EMOTION_DICT[score.argmax(axis = 1).item()][1]
|
97 |
+
output_text = EMOTION_DICT[score.argmax(axis = 1).item()][0]
|
98 |
+
|
99 |
+
emotion_emoji.write(f'<h1> {output_emoji} </h1>', unsafe_allow_html = True)
|
100 |
+
|
101 |
+
categories = [emotion[0] for emotion in EMOTION_DICT.values()]
|
102 |
+
data = list(map(int, (100 * (score / score.sum())).squeeze()))
|
103 |
+
|
104 |
+
trace = go.Scatterpolar(r = data, theta = categories, fill = 'toself', name = 'Emotions')
|
105 |
+
|
106 |
+
layout = go.Layout(
|
107 |
+
polar = dict(
|
108 |
+
radialaxis = dict(
|
109 |
+
visible = False,
|
110 |
+
range = [0, 50]
|
111 |
+
)
|
112 |
+
),
|
113 |
+
)
|
114 |
+
fig = go.Figure(data=[trace], layout=layout)
|
115 |
+
st.plotly_chart(fig, use_container_width=True)
|
116 |
+
|
117 |
+
#emotion_text.write(f'**{output_text}**')
|
demo.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
transformers
|
3 |
+
torchvision
|
4 |
+
torch
|
5 |
+
streamlit
|
6 |
+
plotly
|