Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
import os
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
from tensorflow.keras.models import model_from_json
|
6 |
import streamlit as st
|
|
|
7 |
from PIL import Image
|
8 |
|
9 |
# Load model
|
@@ -22,13 +22,14 @@ def allowed_file(filename):
|
|
22 |
"""Checks the file format when file is uploaded"""
|
23 |
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
24 |
|
|
|
25 |
def Emotion_Analysis(image):
|
26 |
"""It does prediction of Emotions found in the Image provided, saves as Images and returns them"""
|
27 |
gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
28 |
faces = face_haar_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
29 |
|
30 |
if len(faces) == 0:
|
31 |
-
return None
|
32 |
|
33 |
for (x, y, w, h) in faces:
|
34 |
roi = gray_frame[y:y + h, x:x + w]
|
@@ -51,14 +52,11 @@ def Emotion_Analysis(image):
|
|
51 |
|
52 |
return image, pred_emotion
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
processed_image, _ = result
|
60 |
-
return processed_image
|
61 |
-
return frame
|
62 |
|
63 |
st.title('Emotion Detection App')
|
64 |
|
@@ -84,22 +82,6 @@ if upload_option == "Image Upload":
|
|
84 |
|
85 |
elif upload_option == "Webcam":
|
86 |
st.sidebar.write("Webcam Capture")
|
87 |
-
|
88 |
-
FRAME_WINDOW = st.image([])
|
89 |
-
|
90 |
-
camera = cv2.VideoCapture(0)
|
91 |
-
|
92 |
-
while run:
|
93 |
-
success, frame = camera.read()
|
94 |
-
if not success:
|
95 |
-
st.error("Unable to read from webcam. Please check your camera settings.")
|
96 |
-
break
|
97 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
98 |
-
processed_frame = video_frame_callback(frame)
|
99 |
-
FRAME_WINDOW.image(processed_frame)
|
100 |
-
|
101 |
-
camera.release()
|
102 |
else:
|
103 |
st.write("Please select an option to start.")
|
104 |
-
|
105 |
-
|
|
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
import tensorflow as tf
|
4 |
from tensorflow.keras.models import model_from_json
|
5 |
import streamlit as st
|
6 |
+
from streamlit_webrtc import VideoTransformerBase, webrtc_streamer
|
7 |
from PIL import Image
|
8 |
|
9 |
# Load model
|
|
|
22 |
"""Checks the file format when file is uploaded"""
|
23 |
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
24 |
|
25 |
+
|
26 |
def Emotion_Analysis(image):
|
27 |
"""It does prediction of Emotions found in the Image provided, saves as Images and returns them"""
|
28 |
gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
29 |
faces = face_haar_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
30 |
|
31 |
if len(faces) == 0:
|
32 |
+
return image, None
|
33 |
|
34 |
for (x, y, w, h) in faces:
|
35 |
roi = gray_frame[y:y + h, x:x + w]
|
|
|
52 |
|
53 |
return image, pred_emotion
|
54 |
|
55 |
+
class EmotionDetector(VideoTransformerBase):
|
56 |
+
def transform(self, frame):
|
57 |
+
image = frame.to_ndarray(format="bgr24")
|
58 |
+
result_image, _ = Emotion_Analysis(image)
|
59 |
+
return result_image
|
|
|
|
|
|
|
60 |
|
61 |
st.title('Emotion Detection App')
|
62 |
|
|
|
82 |
|
83 |
elif upload_option == "Webcam":
|
84 |
st.sidebar.write("Webcam Capture")
|
85 |
+
webrtc_streamer(key="example", video_transformer_factory=EmotionDetector)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
else:
|
87 |
st.write("Please select an option to start.")
|
|
|
|