Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,14 +3,14 @@ from PIL import Image
|
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
import requests
|
|
|
6 |
import face_recognition
|
7 |
import os
|
8 |
from datetime import datetime
|
9 |
|
10 |
-
|
11 |
-
from
|
12 |
-
|
13 |
-
pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
|
14 |
|
15 |
import io
|
16 |
import streamlit as st
|
@@ -27,6 +27,23 @@ for cls in myList:
|
|
27 |
classnames.append(os.path.splitext(cls)[0])
|
28 |
st.write(classnames)
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def findEncodings(Images):
|
31 |
encodeList = []
|
32 |
for img in Images:
|
@@ -41,46 +58,78 @@ st.write('Encoding Complete')
|
|
41 |
img_file_buffer=st.camera_input("Take a picture")
|
42 |
if img_file_buffer is not None:
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
|
52 |
-
facesCurFrame = face_recognition.face_locations(imgS)
|
53 |
-
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
import requests
|
6 |
+
from keras.models import model_from_json
|
7 |
import face_recognition
|
8 |
import os
|
9 |
from datetime import datetime
|
10 |
|
11 |
+
from keras.models import model_from_json
|
12 |
+
from keras.preprocessing.image import img_to_array
|
13 |
+
from PIL import Image
|
|
|
14 |
|
15 |
import io
|
16 |
import streamlit as st
|
|
|
27 |
classnames.append(os.path.splitext(cls)[0])
|
28 |
st.write(classnames)
|
29 |
|
30 |
+
# load model
|
31 |
+
emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'}
|
32 |
+
# load json and create model
|
33 |
+
json_file = open('emotion_model1.json', 'r')
|
34 |
+
loaded_model_json = json_file.read()
|
35 |
+
json_file.close()
|
36 |
+
classifier = model_from_json(loaded_model_json)
|
37 |
+
|
38 |
+
# load weights into new model
|
39 |
+
classifier.load_weights("emotion_model1.h5")
|
40 |
+
|
41 |
+
#load face
|
42 |
+
try:
|
43 |
+
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
|
44 |
+
except Exception:
|
45 |
+
st.write("Error loading cascade classifiers")
|
46 |
+
|
47 |
def findEncodings(Images):
|
48 |
encodeList = []
|
49 |
for img in Images:
|
|
|
58 |
img_file_buffer=st.camera_input("Take a picture")
|
59 |
if img_file_buffer is not None:
|
60 |
|
61 |
+
test_image = Image.open(img_file_buffer)
|
62 |
+
image1 = Image.open(img_file_buffer)
|
63 |
+
st.image(test_image, use_column_width=True)
|
64 |
+
image = np.asarray(test_image)
|
65 |
|
66 |
+
img = np.asarray(image1)
|
67 |
+
img = cv2.resize(img,(0,0),None,0.25,0.25)
|
|
|
|
|
|
|
68 |
|
69 |
+
#image gray
|
70 |
+
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
71 |
+
faces = face_cascade.detectMultiScale(
|
72 |
+
image=img_gray, scaleFactor=1.3, minNeighbors=5)
|
73 |
+
try:
|
74 |
+
for (x, y, w, h) in faces:
|
75 |
+
cv2.rectangle(img=img, pt1=(x, y), pt2=(
|
76 |
+
x + w, y + h), color=(255, 0, 0), thickness=2)
|
77 |
+
roi_gray = img_gray[y:y + h, x:x + w]
|
78 |
+
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
|
79 |
+
if np.sum([roi_gray]) != 0:
|
80 |
+
roi = roi_gray.astype('float') / 255.0
|
81 |
+
roi = img_to_array(roi)
|
82 |
+
roi = np.expand_dims(roi, axis=0)
|
83 |
+
prediction = classifier.predict(roi)[0]
|
84 |
+
maxindex = int(np.argmax(prediction))
|
85 |
+
finalout = emotion_dict[maxindex]
|
86 |
+
output = str(finalout)
|
87 |
+
st.write(output)
|
88 |
+
label_position = (x, y)
|
89 |
+
img = cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
90 |
+
st.image(img, use_column_width=True)
|
91 |
+
except:
|
92 |
+
st.write("face is not clear")
|
93 |
|
94 |
+
#########################
|
95 |
+
imgS = cv2.resize(image,(0,0),None,0.25,0.25)
|
96 |
+
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
|
97 |
+
facesCurFrame = face_recognition.face_locations(imgS)
|
98 |
+
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
|
99 |
+
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
|
100 |
+
matches = face_recognition.compare_faces(encodeListknown,encodeFace)
|
101 |
+
faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
|
102 |
+
matchIndex = np.argmin(faceDis)
|
103 |
+
if matches[matchIndex]:
|
104 |
+
name = classnames[matchIndex]
|
105 |
+
st.write(name)
|
106 |
+
y1, x2, y2, x1 = faceLoc
|
107 |
+
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
|
108 |
+
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
|
109 |
+
cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
|
110 |
+
cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
|
111 |
+
##############
|
112 |
+
if name:
|
113 |
+
if output=='happy':
|
114 |
+
url = "https://kiwi-whispering-plier.glitch.me/update"
|
115 |
+
|
116 |
+
data = {
|
117 |
+
'name': name,
|
118 |
+
}
|
119 |
+
response = requests.get(url, params=data)
|
120 |
+
|
121 |
+
if response.status_code == 200 :
|
122 |
+
st.write(" data updated on : https://kiwi-whispering-plier.glitch.me" )
|
123 |
+
st.image(image)
|
124 |
+
else :
|
125 |
+
st.write("data not updated ")
|
126 |
|
127 |
+
##############################
|
128 |
+
|
129 |
+
|
130 |
+
else:
|
131 |
+
st.write("Please smile")
|
132 |
+
|
133 |
+
|
134 |
+
else:
|
135 |
+
st.write("Failed")
|