Spaces:
Paused
Paused
# Detecte Emotion By photo | |
import cv2 | |
import os | |
from keras.models import model_from_json | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import threading | |
def display_image(image_array): | |
cv2.imshow('My Image', image_array) | |
cv2.waitKey(0) | |
cv2.destroyAllWindows() | |
# os.chdir('models') | |
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", | |
3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"} | |
# load json and create model | |
# put your model path there | |
json_file = open( | |
"emotion_recognition/models/emotion_model(MyNet0.82).json", 'r') | |
loaded_model_json = json_file.read() | |
json_file.close() | |
emotion_model = model_from_json(loaded_model_json) | |
# load weights into new model | |
# put your weight path there | |
emotion_model.load_weights( | |
"emotion_recognition/models/emotion_model(MyNet0.82).h5") | |
print("Loaded model from disk") | |
def detectFace(net, frame, confidence_threshold=0.7): | |
frameOpencvDNN = frame.copy() | |
print(frameOpencvDNN.shape) | |
frameHeight = frameOpencvDNN.shape[0] | |
frameWidth = frameOpencvDNN.shape[1] | |
blob = cv2.dnn.blobFromImage(frameOpencvDNN, 1.0, (227, 227), [ | |
124.96, 115.97, 106.13], swapRB=True, crop=False) | |
net.setInput(blob) | |
detections = net.forward() | |
faceBoxes = [] | |
for i in range(detections.shape[2]): | |
confidence = detections[0, 0, i, 2] | |
if confidence > confidence_threshold: | |
x1 = int(detections[0, 0, i, 3]*frameWidth) | |
y1 = int(detections[0, 0, i, 4]*frameHeight) | |
x2 = int(detections[0, 0, i, 5]*frameWidth) | |
y2 = int(detections[0, 0, i, 6]*frameHeight) | |
print("x1=", x1, " x2=", x2, " y1=", y1, " y2=", y2) | |
faceBoxes.append([x1, y1, x2, y2]) | |
cv2.rectangle(frameOpencvDNN, (x1, y1), (x2, y2), | |
(0, 255, 0), int(round(frameHeight/150)), 8) | |
return frameOpencvDNN, faceBoxes | |
faceProto = 'emotion_recognition/models/opencv_face_detector.pbtxt' | |
faceModel = 'emotion_recognition/models/opencv_face_detector_uint8.pb' | |
# Loding detecting face model | |
faceNet = cv2.dnn.readNet(faceModel, faceProto) | |
# Get a Test image and process it to send it to model | |
def ai(path): | |
f = cv2.imread(path, cv2.IMREAD_COLOR) | |
# cv2.imshow("fla",f) | |
gray_frame = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY) | |
resultImg, faceBoxes = detectFace(faceNet, f) | |
print('faceBoxes', faceBoxes) | |
# Get the cordnate of face | |
x1, y1, x2, y2 = faceBoxes[0][0], faceBoxes[0][1], faceBoxes[0][2], faceBoxes[0][3] | |
print("x , y , w , h", x1, y1, x2, y2) | |
roi_gray_frame = gray_frame[y1-20:y2+10, x1-20:x2+10] | |
cropped_img = np.expand_dims(np.expand_dims( | |
cv2.resize(roi_gray_frame, (48, 48)), -1), 0) | |
img_resized = cv2.resize(resultImg, (0, 0), fx=0.5, | |
fy=0.5, interpolation=cv2.INTER_AREA) | |
# send photo to model | |
emotion_prediction = emotion_model.predict(cropped_img) | |
# Get the result | |
maxindex = int(np.argmax(emotion_prediction)) | |
cv2.putText(img_resized, emotion_dict[maxindex], (x1+5, y1-20), | |
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA) | |
# cv2.imshow("crop",resultImg) | |
# cv2.resizeWindow("crop",720,460) | |
# cv2.imshow("crop2",roi_gray_frame) | |
# cv2.resizeWindow("crop2",720,460) | |
print("emotion_prediction=", emotion_dict[maxindex]) | |
display_thread = threading.Thread( | |
target=display_image, args=(img_resized,)) | |
display_thread.start() | |
return emotion_dict[maxindex] | |