Garvitj commited on
Commit
0cffb96
·
verified ·
1 Parent(s): 506cf6c

Upload new.py

Browse files
Files changed (1) hide show
  1. new.py +61 -0
new.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import tensorflow as tf
7
+
8
+ # Load the face detector with the correct file path
9
+ face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
10
+ # Load the emotion model
11
+ emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
12
+ emotion_model = tf.keras.models.load_model("model_emotion.h5")
13
+ emotion_model.load_weights("model_weights_new.h5")
14
+ print("Loaded emotion model from disk")
15
+
16
+ # Define the predict_img function
17
+ def predict_img(frame):
18
+ # Resize the image
19
+ frame = cv2.resize(frame, (1280, 720))
20
+ num_faces = face_detector.detectMultiScale(frame, scaleFactor=1.3, minNeighbors=5)
21
+
22
+ # Draw bounding boxes and annotate the image
23
+ for (x, y, w, h) in num_faces:
24
+ cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
25
+ roi_gray_frame = frame[y:y + h, x:x + w]
26
+
27
+ # Preprocess the input image
28
+ resized_img = cv2.resize(roi_gray_frame, (48, 48))
29
+ gray_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)
30
+ input_img = np.expand_dims(gray_img, axis=-1) # Add the channel dimension
31
+ input_img = np.expand_dims(input_img, axis=0) # Add the batch dimension
32
+
33
+ # Normalize the image
34
+ input_img = input_img / 255.0
35
+
36
+ # Predict the emotions
37
+ emotion_prediction = emotion_model.predict(input_img)
38
+ maxindex = int(np.argmax(emotion_prediction))
39
+ emotion_label = emotion_dict[maxindex]
40
+
41
+ # Annotate the image with emotion label
42
+ cv2.putText(frame, emotion_label, (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
43
+
44
+ return frame
45
+
46
+ # Capture video from webcam
47
+ cap = cv2.VideoCapture(0)
48
+
49
+ while True:
50
+ ret, frame = cap.read()
51
+ if not ret:
52
+ break
53
+
54
+ annotated_frame = predict_img(frame)
55
+ cv2.imshow('Emotion Detection', annotated_frame)
56
+
57
+ if cv2.waitKey(1) & 0xFF == ord('q'):
58
+ break
59
+
60
+ cap.release()
61
+ cv2.destroyAllWindows()