awinml commited on
Commit
0bce5c5
1 Parent(s): 0ffec82

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +84 -46
  2. requirements.txt +6 -9
app.py CHANGED
@@ -1,6 +1,6 @@
1
  from mtcnn.mtcnn import MTCNN
2
  import streamlit as st
3
- import matplotlib.pyplot as plt
4
  from matplotlib.patches import Rectangle
5
  from matplotlib.patches import Circle
6
  from PIL import Image
@@ -11,18 +11,25 @@ from keras_vggface.vggface import VGGFace
11
 
12
  st.header("Face Detection using a Pre-trained CNN model")
13
 
14
- choice = st.selectbox("",[
15
- "Face Detection - Show Bounding Box",
16
- "Face Detection - Extract Face",
17
- "Face Verification"
18
- ])
 
 
 
 
 
19
  def main():
20
  fig = plt.figure()
21
  if choice == "Face Detection - Show Bounding Box":
22
  st.subheader("Face Detection - Show Bounding Box")
23
- st.write("Please upload an image containing a face. A box will be drawn highlighting the face using the pretrained VGGFace model.")
 
 
24
  # load the image
25
- uploaded_file = st.file_uploader("Upload Image", type=["jpg","png"], key="1")
26
  if uploaded_file is not None:
27
  data = asarray(Image.open(uploaded_file))
28
  # plot the image
@@ -38,25 +45,29 @@ def main():
38
  faces = detector.detect_faces(data)
39
  for face in faces:
40
  # get coordinates
41
- x, y, width, height = face['box']
42
  # create the shape
43
- rect = Rectangle((x, y), width, height, fill=False, color='green')
44
  # draw the box
45
  ax.add_patch(rect)
46
  # draw the dots
47
- for _, value in face['keypoints'].items():
48
  # create and draw dot
49
- dot = Circle(value, radius=2, color='green')
50
  ax.add_patch(dot)
51
  # show the plot
52
  st.pyplot(fig)
53
- st.write("The box highlights the face and the dots highlight the identified features.")
 
 
54
 
55
  elif choice == "Face Detection - Extract Face":
56
  st.subheader("Face Detection - Extract Face")
57
- st.write("Please upload an image containing a face. The part of the image containing the face will be extracted using the pretrained VGGFace model.")
 
 
58
 
59
- uploaded_file = st.file_uploader("Upload Image", type=["jpg","png"], key="2")
60
  if uploaded_file is not None:
61
  column1, column2 = st.columns(2)
62
  image = Image.open(uploaded_file)
@@ -79,52 +90,78 @@ def main():
79
  face = pixels[y1:y2, x1:x2]
80
  # resize pixels to the model size
81
  image = Image.fromarray(face)
82
- image = image.resize((224, 224)) # Rodgers -> You can just save this as image
 
 
83
  face_array = asarray(image)
84
  with column2:
85
- plt.imshow(face_array)
86
- st.pyplot(fig)
87
-
88
  elif choice == "Face Verification":
89
  st.subheader("Face Verification")
90
- st.write("Please upload two image of the same person. The model will check if the two images contain the same face.")
91
- st.write("Classifies whether the images match based on the probability score predicted by the model. If the difference is below the threshold (0.5 here) then the images are said to be identical.")
 
 
 
 
92
  column1, column2 = st.columns(2)
93
-
94
  with column1:
95
- image1 = st.file_uploader("Upload First Image", type=["jpg","png"], key="3")
96
-
 
 
97
  with column2:
98
- image2 = st.file_uploader("Upload Second Image", type=["jpg","png"], key="4")
 
 
99
  # define filenames
100
- if (image1 is not None) & (image2 is not None):
101
  col1, col2 = st.columns(2)
102
- image1 = Image.open(image1)
103
- image2 = Image.open(image2)
104
  with col1:
105
  st.image(image1)
106
  with col2:
107
  st.image(image2)
108
 
109
- filenames = [image1,image2]
110
-
111
  faces = [extract_face(f) for f in filenames]
112
- # convert into an array of samples
113
- samples = asarray(faces, "float32")
114
- # prepare the face for the model, e.g. center pixels
115
- samples = preprocess_input(samples, version=2)
116
- # create a vggface model
117
- model = VGGFace(model= "resnet50" , include_top=False, input_shape=(224, 224, 3),
118
- pooling= "avg" )
119
- # perform prediction
120
- embeddings = model.predict(samples)
121
- thresh = 0.5
122
-
123
- score = cosine(embeddings[0], embeddings[1])
124
- if score <= thresh:
125
- st.success( " > Face is a match ( Score %.3f <= %.3f) " % (score, thresh))
126
  else:
127
- st.error(" > Face is NOT a match ( Score %.3f > %.3f)" % (score, thresh))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
 
130
  def extract_face(file):
@@ -135,6 +172,8 @@ def extract_face(file):
135
  detector = MTCNN()
136
  # detect faces in the image
137
  results = detector.detect_faces(pixels)
 
 
138
  # extract the bounding box from the first face
139
  x1, y1, width, height = results[0]["box"]
140
  x2, y2 = x1 + width, y1 + height
@@ -145,7 +184,6 @@ def extract_face(file):
145
  image = image.resize((224, 224))
146
  face_array = asarray(image)
147
  return face_array
148
-
149
 
150
 
151
  if __name__ == "__main__":
 
1
  from mtcnn.mtcnn import MTCNN
2
  import streamlit as st
3
+ import matplotlib.pyplot as plt
4
  from matplotlib.patches import Rectangle
5
  from matplotlib.patches import Circle
6
  from PIL import Image
 
11
 
12
  st.header("Face Detection using a Pre-trained CNN model")
13
 
14
+ choice = st.selectbox(
15
+ "Choose task",
16
+ [
17
+ "Face Detection - Show Bounding Box",
18
+ "Face Detection - Extract Face",
19
+ "Face Verification",
20
+ ],
21
+ )
22
+
23
+
24
  def main():
25
  fig = plt.figure()
26
  if choice == "Face Detection - Show Bounding Box":
27
  st.subheader("Face Detection - Show Bounding Box")
28
+ st.write(
29
+ "Please upload an image containing a face. A box will be drawn highlighting the face using the pretrained VGGFace model."
30
+ )
31
  # load the image
32
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "png"], key="1")
33
  if uploaded_file is not None:
34
  data = asarray(Image.open(uploaded_file))
35
  # plot the image
 
45
  faces = detector.detect_faces(data)
46
  for face in faces:
47
  # get coordinates
48
+ x, y, width, height = face["box"]
49
  # create the shape
50
+ rect = Rectangle((x, y), width, height, fill=False, color="green")
51
  # draw the box
52
  ax.add_patch(rect)
53
  # draw the dots
54
+ for _, value in face["keypoints"].items():
55
  # create and draw dot
56
+ dot = Circle(value, radius=2, color="green")
57
  ax.add_patch(dot)
58
  # show the plot
59
  st.pyplot(fig)
60
+ st.write(
61
+ "The box highlights the face and the dots highlight the identified features."
62
+ )
63
 
64
  elif choice == "Face Detection - Extract Face":
65
  st.subheader("Face Detection - Extract Face")
66
+ st.write(
67
+ "Please upload an image containing a face. The part of the image containing the face will be extracted using the pretrained VGGFace model."
68
+ )
69
 
70
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "png"], key="2")
71
  if uploaded_file is not None:
72
  column1, column2 = st.columns(2)
73
  image = Image.open(uploaded_file)
 
90
  face = pixels[y1:y2, x1:x2]
91
  # resize pixels to the model size
92
  image = Image.fromarray(face)
93
+ image = image.resize(
94
+ (224, 224)
95
+ ) # Rodgers -> You can just save this as image
96
  face_array = asarray(image)
97
  with column2:
98
+ plt.imshow(face_array)
99
+ st.pyplot(fig)
100
+
101
  elif choice == "Face Verification":
102
  st.subheader("Face Verification")
103
+ st.write(
104
+ "Please upload two image of the same person. The model will check if the two images contain the same face."
105
+ )
106
+ st.write(
107
+ "Classifies whether the images match based on the probability score predicted by the model. If the difference is below the threshold (0.5 here) then the images are said to be identical."
108
+ )
109
  column1, column2 = st.columns(2)
110
+
111
  with column1:
112
+ image1 = st.file_uploader(
113
+ "Upload First Image", type=["jpg", "png"], key="3"
114
+ )
115
+
116
  with column2:
117
+ image2 = st.file_uploader(
118
+ "Upload Second Image", type=["jpg", "png"], key="4"
119
+ )
120
  # define filenames
121
+ if (image1 is not None) & (image2 is not None):
122
  col1, col2 = st.columns(2)
123
+ image1 = Image.open(image1)
124
+ image2 = Image.open(image2)
125
  with col1:
126
  st.image(image1)
127
  with col2:
128
  st.image(image2)
129
 
130
+ filenames = [image1, image2]
 
131
  faces = [extract_face(f) for f in filenames]
132
+ print(faces)
133
+ faces_checked = [face for face in faces if face != []]
134
+ print(faces_checked)
135
+ if len(faces_checked) == 1:
136
+ st.error(
137
+ "Could not find face in one of the images. Please Upload better quality images!"
138
+ )
139
+ st.stop()
 
 
 
 
 
 
140
  else:
141
+ # convert into an array of samples
142
+ samples = asarray(faces_checked, "float32")
143
+ # prepare the face for the model, e.g. center pixels
144
+ samples = preprocess_input(samples, version=2)
145
+ # create a vggface model
146
+ model = VGGFace(
147
+ model="resnet50",
148
+ include_top=False,
149
+ input_shape=(224, 224, 3),
150
+ pooling="avg",
151
+ )
152
+ # perform prediction
153
+ embeddings = model.predict(samples)
154
+ thresh = 0.5
155
+
156
+ score = cosine(embeddings[0], embeddings[1])
157
+ if score <= thresh:
158
+ st.success(
159
+ " > Face is a match ( Score %.3f <= %.3f) " % (score, thresh)
160
+ )
161
+ else:
162
+ st.error(
163
+ " > Face is NOT a match ( Score %.3f > %.3f)" % (score, thresh)
164
+ )
165
 
166
 
167
  def extract_face(file):
 
172
  detector = MTCNN()
173
  # detect faces in the image
174
  results = detector.detect_faces(pixels)
175
+ if results == []:
176
+ return []
177
  # extract the bounding box from the first face
178
  x1, y1, width, height = results[0]["box"]
179
  x2, y2 = x1 + width, y1 + height
 
184
  image = image.resize((224, 224))
185
  face_array = asarray(image)
186
  return face_array
 
187
 
188
 
189
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,14 +1,11 @@
1
- Keras
2
- Keras-Applications
3
- Keras-Preprocessing
4
- git+https://github.com/ma7555/keras-vggface.git
5
- tensorflow-cpu
6
- mtcnn
7
  matplotlib
8
- numpy
9
  oauthlib
10
- opencv-python-headless
11
  pandas
12
  Pillow
13
  scipy
14
- streamlit
 
 
 
 
1
+ tensorflow
2
+ streamlit
 
 
 
 
3
  matplotlib
 
4
  oauthlib
 
5
  pandas
6
  Pillow
7
  scipy
8
+ numpy==1.23.1
9
+ protobuf==3.20.3
10
+ mtcnn
11
+ git+https://github.com/ma7555/keras-vggface.git