Shafeek Saleem commited on
Commit
2736c78
1 Parent(s): e87299f

implemented face recognition

Browse files
pages/3_Face Encodings.py CHANGED
@@ -7,6 +7,7 @@ import os
7
  import time
8
  import face_recognition
9
  import json
 
10
 
11
  initialize_login()
12
  initialize_level()
@@ -61,7 +62,7 @@ def step3_page():
61
  face_image = face_recognition.load_image_file(os.path.join(img_dir, img))
62
  my_face_encoding = face_recognition.face_encodings(face_image)
63
  my_bar.progress(int((i + 1) / len(images) * 100), text="Generating face encodings...")
64
- with open(os.path.join(".sessions", get_login()["username"], img.split("_")[1]+".npy"), 'wb') as f:
65
  np.save(f, my_face_encoding)
66
  # face_encodings_dict[img.split("_")[1]] = my_face_encoding.tolist()
67
  my_bar.progress(100, text="Successfully encoded all the known faces!")
 
7
  import time
8
  import face_recognition
9
  import json
10
+ import numpy as np
11
 
12
  initialize_login()
13
  initialize_level()
 
62
  face_image = face_recognition.load_image_file(os.path.join(img_dir, img))
63
  my_face_encoding = face_recognition.face_encodings(face_image)
64
  my_bar.progress(int((i + 1) / len(images) * 100), text="Generating face encodings...")
65
+ with open(os.path.join(".sessions", get_login()["username"], "face_encodings",img.split("_")[1]+".npy"), 'wb') as f:
66
  np.save(f, my_face_encoding)
67
  # face_encodings_dict[img.split("_")[1]] = my_face_encoding.tolist()
68
  my_bar.progress(100, text="Successfully encoded all the known faces!")
pages/4_Face Recognition.py CHANGED
@@ -4,6 +4,9 @@ from utils.login import get_login, initialize_login
4
  from utils.inference import query
5
  import os
6
  import time
 
 
 
7
 
8
  initialize_login()
9
  initialize_level()
@@ -20,58 +23,71 @@ def infer(image):
20
  for item in output:
21
  st.progress(item["score"], text=item["label"])
22
 
 
 
23
 
24
  def step4_page():
25
  st.header("Trying It Out")
26
- st.markdown(
27
- """
28
- ### How Our Emotion Detection Application Works
29
- Now that we have trained our emotion detection application, let's see how it works in action! Here's a simple explanation of how the application recognizes emotions:
 
 
 
 
 
 
 
 
 
 
30
 
31
- 1. **Looking at Faces**: When we use our emotion detection application, we can show it a picture of a face or use a camera to capture a real-time image. It's like giving our application a chance to see someone's expression.
 
 
32
 
33
- 2. **Observing the Features**: The application carefully looks at the face and pays attention to different parts, like the eyes, mouth, and eyebrows. It tries to understand the expressions by noticing how these parts look and how they are positioned. It's like the application is taking a close look at the face, just like we do when we try to understand someone's emotions.
34
- """
35
- )
36
- st.image(
37
- "https://camo.githubusercontent.com/3bb4e2eba7c8a91d71916496bc775e870222f19bb5098cb4bc514ed60078c1e7/68747470733a2f2f626c6f672e7161746573746c61622e636f6d2f77702d636f6e74656e742f75706c6f6164732f323032302f30312f4d4c5f6578616d706c652e6769663f7261773d74727565",
38
- use_column_width=True,
39
- )
40
- st.markdown(
41
- """
42
- 3. **Guessing the Emotion**: Based on what it observed, our application uses the knowledge it learned during training to make its best guess about the person's emotion. It remembers the patterns it saw before and tries to match them with the features it observed. It might think the person looks happy, sad, or maybe surprised!
43
- """
44
- )
45
- st.image(
46
- "https://miro.medium.com/v2/resize:fit:1358/1*KoHwRNZGrVrhdbye3BDEew.png",
47
- use_column_width=True,
48
- )
49
- st.markdown(
50
- """
51
- 4. **Providing a Result**: Finally, our emotion detection application tells us what emotion it thinks the person is feeling. It might say, "I think this person looks happy!" or "I think this person looks sad." It's like having a virtual friend who can give us their guess about someone's emotion.
52
 
53
- By going through these steps, our emotion detection application can quickly analyze faces and give us an idea of how someone might be feeling. It's like having a special friend who can understand and guess emotions based on facial expressions!
54
- """
55
- )
56
 
57
- st.info(
58
- "Now that we know how our emotion detection application works, let's try it out!"
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
- st.info("Select an image to analyze!")
62
- input_type = st.radio("Select the Input Type", ["Image", "Camera"])
63
-
64
- if input_type == "Camera":
65
- image = st.camera_input("Take a picture")
66
- byte_image = image.getvalue() if image else None
67
- else:
68
- image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
69
- byte_image = image.read() if image else None
70
- try_img = os.path.join(".sessions", get_login()["username"], "try.jpg")
71
- if byte_image:
72
- with open(try_img, "wb") as f:
73
- f.write(byte_image)
74
- infer(try_img)
75
 
76
  st.info("Click on the button below to complete this level!")
77
  if st.button("Complete Level"):
 
4
  from utils.inference import query
5
  import os
6
  import time
7
+ import face_recognition
8
+ import cv2
9
+ import numpy as np
10
 
11
  initialize_login()
12
  initialize_level()
 
23
  for item in output:
24
  st.progress(item["score"], text=item["label"])
25
 
26
+ # Get a reference to webcam #0 (the default one)
27
+ video_capture = cv2.VideoCapture(0)
28
 
29
  def step4_page():
30
  st.header("Trying It Out")
31
+ st.info(
32
+ "Now that we know how our face recognition application works, let's try it out!"
33
+ )
34
+ face_encodings_dir = os.path.join(".sessions", get_login()["username"], "face_encodings")
35
+ face_encodings = os.listdir(face_encodings_dir)
36
+ known_face_encodings = []
37
+ known_face_names = []
38
+ if len(face_encodings) > 0:
39
+ for i, face_encoding in enumerate(face_encodings):
40
+ with open(os.path.join(face_encodings_dir, face_encoding), 'rb') as f:
41
+ known_face_encoding = np.load(f)
42
+ face_name = img.split(".")[0]
43
+ known_face_encodings.append(known_face_encoding)
44
+ known_face_names.append(face_name)
45
 
46
+ while True:
47
+ # Grab a single frame of video
48
+ ret, frame = video_capture.read()
49
 
50
+ # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
51
+ rgb_frame = frame[:, :, ::-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ # Find all the faces and face encodings in the frame of video
54
+ face_locations = face_recognition.face_locations(rgb_frame)
55
+ face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
56
 
57
+ # Loop through each face in this frame of video
58
+ for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
59
+ # See if the face is a match for the known face(s)
60
+ matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
61
+
62
+ name = "Unknown"
63
+
64
+ # If a match was found in known_face_encodings, just use the first one.
65
+ # if True in matches:
66
+ # first_match_index = matches.index(True)
67
+ # name = known_face_names[first_match_index]
68
+
69
+ # Or instead, use the known face with the smallest distance to the new face
70
+ face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
71
+ best_match_index = np.argmin(face_distances)
72
+ if matches[best_match_index]:
73
+ name = known_face_names[best_match_index]
74
+
75
+ # Draw a box around the face
76
+ cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
77
+
78
+ # Draw a label with a name below the face
79
+ cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
80
+ font = cv2.FONT_HERSHEY_DUPLEX
81
+ cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
82
 
83
+ # Display the resulting image
84
+ cv2.imshow('Video', frame)
85
+ # Hit 'q' on the keyboard to quit!
86
+ if cv2.waitKey(1) & 0xFF == ord('q'):
87
+ break
88
+ # Release handle to the webcam
89
+ video_capture.release()
90
+ cv2.destroyAllWindows()
 
 
 
 
 
 
91
 
92
  st.info("Click on the button below to complete this level!")
93
  if st.button("Complete Level"):