Spaces:
Running
Running
abrar-adnan
commited on
Commit
•
f20ef5e
1
Parent(s):
fbbc260
Update app.py
Browse files
app.py
CHANGED
@@ -59,13 +59,6 @@ def getTranscription(path):
|
|
59 |
return transcription[0]
|
60 |
|
61 |
def video_processing(video_file, encoded_video):
|
62 |
-
angry = 0
|
63 |
-
disgust = 0
|
64 |
-
fear = 0
|
65 |
-
happy = 0
|
66 |
-
sad = 0
|
67 |
-
surprise = 0
|
68 |
-
neutral = 0
|
69 |
emotion_count = 0
|
70 |
video_emotions = {
|
71 |
'angry': 0,
|
@@ -116,7 +109,6 @@ def video_processing(video_file, encoded_video):
|
|
116 |
|
117 |
# Find all the faces in the frame using a pre-trained convolutional neural network.
|
118 |
face_locations = face_recognition.face_locations(gray)
|
119 |
-
#face_locations = face_recognition.face_locations(gray, number_of_times_to_upsample=0, model="cnn")
|
120 |
|
121 |
if len(face_locations) > 0:
|
122 |
# Show the original frame with face rectangles drawn around the faces
|
@@ -146,23 +138,14 @@ def video_processing(video_file, encoded_video):
|
|
146 |
elif result[0] == 'off_camera':
|
147 |
off_camera += 1
|
148 |
total += 1
|
149 |
-
|
150 |
-
# angry += emotion[0]['emotion']['angry']
|
151 |
-
# disgust += emotion[0]['emotion']['disgust']
|
152 |
-
# fear += emotion[0]['emotion']['fear']
|
153 |
-
# happy += emotion[0]['emotion']['happy']
|
154 |
-
# sad += emotion[0]['emotion']['sad']
|
155 |
-
# surprise += emotion[0]['emotion']['surprise']
|
156 |
-
# neutral += emotion[0]['emotion']['neutral']
|
157 |
|
158 |
try:
|
159 |
# your processing code here
|
160 |
gaze_percentage = on_camera / total * 100
|
161 |
except Exception as e:
|
162 |
print(f"An error occurred while processing the video: {e}")
|
163 |
-
gaze_percentage =
|
164 |
print(f'Total = {total},on_camera = {on_camera},off_camera = {off_camera}')
|
165 |
-
# print(f'focus perfectage = {on_camera/total*100}')
|
166 |
# Release the video capture object and close all windows
|
167 |
video_capture.release()
|
168 |
cv2.destroyAllWindows()
|
@@ -172,37 +155,18 @@ def video_processing(video_file, encoded_video):
|
|
172 |
os.remove("temp_video.mp4")
|
173 |
print(gaze_percentage)
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
sad = sad / emotion_count
|
180 |
-
surprise = surprise / emotion_count
|
181 |
-
neutral = neutral / emotion_count
|
182 |
-
# emotion = {
|
183 |
-
# 'angry': angry,
|
184 |
-
# 'disgust': disgust,
|
185 |
-
# 'fear': fear,
|
186 |
-
# 'happy': happy,
|
187 |
-
# 'sad': sad,
|
188 |
-
# 'surprise': surprise,
|
189 |
-
# 'neutral': neutral
|
190 |
-
# },
|
191 |
final_result_dict = {
|
192 |
"gaze_percentage" : gaze_percentage,
|
193 |
"face_emotion" : video_emotions,
|
194 |
-
"text_emotion" : text_emotion,
|
195 |
"transcription" : transcription,
|
196 |
"text_sentiment" : text_sentiment
|
197 |
}
|
198 |
|
199 |
-
# angry = 'total anger percentage' + str(angry)
|
200 |
-
# disgust = 'total disgust percentage' + str(disgust)
|
201 |
-
# fear = 'total fear percentage' + str(fear)
|
202 |
-
# happy = 'total happy percentage' + str(happy)
|
203 |
-
# sad = 'total sad percentage' + str(sad)
|
204 |
-
# surprise = 'total surprise percentage' + str(surprise)
|
205 |
-
# neutral = 'total neutral percentage' + str(neutral)
|
206 |
print(f'total anger percentage = {angry}')
|
207 |
print(f'total disgust percentage = {disgust}')
|
208 |
print(f'total fear percentage = {fear}')
|
|
|
59 |
return transcription[0]
|
60 |
|
61 |
def video_processing(video_file, encoded_video):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
emotion_count = 0
|
63 |
video_emotions = {
|
64 |
'angry': 0,
|
|
|
109 |
|
110 |
# Find all the faces in the frame using a pre-trained convolutional neural network.
|
111 |
face_locations = face_recognition.face_locations(gray)
|
|
|
112 |
|
113 |
if len(face_locations) > 0:
|
114 |
# Show the original frame with face rectangles drawn around the faces
|
|
|
138 |
elif result[0] == 'off_camera':
|
139 |
off_camera += 1
|
140 |
total += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
try:
|
143 |
# your processing code here
|
144 |
gaze_percentage = on_camera / total * 100
|
145 |
except Exception as e:
|
146 |
print(f"An error occurred while processing the video: {e}")
|
147 |
+
gaze_percentage = 'ERROR : no face detected'
|
148 |
print(f'Total = {total},on_camera = {on_camera},off_camera = {off_camera}')
|
|
|
149 |
# Release the video capture object and close all windows
|
150 |
video_capture.release()
|
151 |
cv2.destroyAllWindows()
|
|
|
155 |
os.remove("temp_video.mp4")
|
156 |
print(gaze_percentage)
|
157 |
|
158 |
+
# Divide all emotion values by emotion count
|
159 |
+
if emotion_count > 0:
|
160 |
+
for key in video_emotions.keys():
|
161 |
+
video_emotions[key] /= emotion_count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
final_result_dict = {
|
163 |
"gaze_percentage" : gaze_percentage,
|
164 |
"face_emotion" : video_emotions,
|
165 |
+
"text_emotion" : text_emotion[0][0],
|
166 |
"transcription" : transcription,
|
167 |
"text_sentiment" : text_sentiment
|
168 |
}
|
169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
print(f'total anger percentage = {angry}')
|
171 |
print(f'total disgust percentage = {disgust}')
|
172 |
print(f'total fear percentage = {fear}')
|