paresh95 commited on
Commit
988dde3
1 Parent(s): 0347340

PS | Add face comparison feature

Browse files
app.py CHANGED
@@ -1,51 +1,142 @@
1
  import gradio as gr
2
  import os
3
  import yaml
 
4
  from src.face_texture import GetFaceTexture
5
  from src.face_symmetry import GetFaceSymmetry
6
  from src.face_demographics import GetFaceDemographics
7
  from src.face_proportions import GetFaceProportions
 
 
8
 
9
 
10
- def combined_fn(input_image, input_image_2):
11
- demographics_dict = GetFaceDemographics().main(input_image)
12
- golden_ratios_dict, equal_ratios_dict, face_landmarks_image = GetFaceProportions().main(input_image)
13
- face_symmetry_image, symmetry_dict = GetFaceSymmetry().main(input_image)
14
- face_image, face_texture_image, texture_dict = GetFaceTexture().main(input_image)
15
-
 
 
 
16
  results = {
17
  "Demographic predictions": demographics_dict,
18
- "Face proportions (golden ratio)": golden_ratios_dict,
19
- "Face proportions (equal ratio)": equal_ratios_dict,
20
  "Face symmetry metrics": symmetry_dict,
21
- "Face texture metrics": texture_dict
22
  }
23
- with open("parameters.yml", 'r') as file:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  data = yaml.safe_load(file)
25
  results_interpretation = data["results_interpretation"]
26
-
27
- return (results, results_interpretation, face_image, face_landmarks_image, face_symmetry_image, face_texture_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  gigi_hadid = os.path.join(os.path.dirname(__file__), "data/gigi_hadid.webp")
30
- jay_z = os.path.join(os.path.dirname(__file__), "data/jay_z.jpg")
31
 
32
  iface = gr.Interface(
33
- fn=combined_fn,
34
  inputs=[
35
- gr.Image(type="pil", label="Upload Face 1", value=jay_z),
36
- gr.Image(type="pil", label="Upload Face 2", value=gigi_hadid)
37
- ],
38
  outputs=[
39
- gr.JSON(label="Results"),
40
  gr.JSON(label="Results explainer"),
41
  gr.Image(type="pil", label="Extracted face"),
42
  gr.Image(type="pil", label="Face landmarks"),
43
  gr.Image(type="pil", label="Face symmetry"),
44
- gr.Image(type="pil", label="Extracted face texture"),
45
  ],
46
  title="Advanced Facial Feature Detector",
47
- description=
48
- """
49
  <!DOCTYPE html>
50
  <html lang="en">
51
  <head>
@@ -59,18 +150,21 @@ iface = gr.Interface(
59
  </style>
60
  </head>
61
  <body>
62
-
63
  <div class="section">
64
- <p><strong>Description:</strong> This tool analyses a facial image to predict age and gender, assess symmetry, evaluate proportions, and examine texture.</p>
65
- <p><strong>Instructions:</strong> For optimal results, upload a clear front-facing image (see example image). To do so, either drag and drop your photo or click on "Upload Face Image", then press 'Submit'.</p>
66
- <p><strong>Interpreting the results:</strong></p>
 
67
  <p><strong>Other information:</strong></p>
68
  <ul>
 
69
  <li>No uploaded photo is stored.</li>
70
- <li>The output will take several seconds to compute.</li>
71
  <li>If an error occurs try again or try a different photo or angle.</li>
 
72
  </ul>
73
- </div>
 
74
  </body>
75
  </html>
76
  """,
 
1
  import gradio as gr
2
  import os
3
  import yaml
4
+ import pandas as pd
5
  from src.face_texture import GetFaceTexture
6
  from src.face_symmetry import GetFaceSymmetry
7
  from src.face_demographics import GetFaceDemographics
8
  from src.face_proportions import GetFaceProportions
9
+ from PIL import Image as PILImage
10
+ from typing import List, Any
11
 
12
 
13
+ def get_results(image_input: PILImage.Image) -> List[Any]:
14
+ demographics_dict = GetFaceDemographics().main(image_input)
15
+ (
16
+ ratios_dict,
17
+ face_landmarks_image,
18
+ ) = GetFaceProportions().main(image_input)
19
+ face_symmetry_image, symmetry_dict = GetFaceSymmetry().main(image_input)
20
+ face_image, face_texture_image, texture_dict = GetFaceTexture().main(image_input)
21
+
22
  results = {
23
  "Demographic predictions": demographics_dict,
24
+ "Face proportions": ratios_dict,
 
25
  "Face symmetry metrics": symmetry_dict,
26
+ "Face texture metrics": texture_dict,
27
  }
28
+
29
+ return (
30
+ results,
31
+ face_image,
32
+ face_landmarks_image,
33
+ face_symmetry_image,
34
+ face_texture_image,
35
+ )
36
+
37
+
38
+ def concatenate_image(
39
+ image_1: PILImage.Image, image_2: PILImage.Image
40
+ ) -> PILImage.Image:
41
+ image = PILImage.new("RGB", (image_1.width + image_2.width, image_1.height))
42
+ image.paste(image_1, (0, 0))
43
+ image.paste(image_2, (image_1.width, 0))
44
+ return image
45
+
46
+
47
+ def get_dict_child_data(results_image: dict, image_number: int) -> dict:
48
+ flattened_data = {"image": f"Face {image_number}"}
49
+ for key, sub_dict in results_image.items():
50
+ for sub_key, value in sub_dict.items():
51
+ flattened_data[sub_key] = value
52
+ return flattened_data
53
+
54
+
55
+ def output_fn(
56
+ image_input_1: PILImage.Image, image_input_2: PILImage.Image
57
+ ) -> List[Any]:
58
+ with open("parameters.yml", "r") as file:
59
  data = yaml.safe_load(file)
60
  results_interpretation = data["results_interpretation"]
61
+
62
+ if image_input_1 is not None and image_input_2 is not None:
63
+ (
64
+ results_image_1,
65
+ face_image_1,
66
+ face_landmarks_image_1,
67
+ face_symmetry_image_1,
68
+ face_texture_image_1,
69
+ ) = get_results(image_input_1)
70
+ (
71
+ results_image_2,
72
+ face_image_2,
73
+ face_landmarks_image_2,
74
+ face_symmetry_image_2,
75
+ face_texture_image_2,
76
+ ) = get_results(image_input_2)
77
+ results_image_1, results_image_2 = get_dict_child_data(
78
+ results_image_1, 1
79
+ ), get_dict_child_data(results_image_2, 2)
80
+ results_df = pd.DataFrame([results_image_1, results_image_2])
81
+ face_image = concatenate_image(face_image_1, face_image_2)
82
+ face_landmarks_image = concatenate_image(
83
+ face_landmarks_image_1, face_landmarks_image_2
84
+ )
85
+ face_symmetry_image = concatenate_image(
86
+ face_symmetry_image_1, face_symmetry_image_2
87
+ )
88
+ face_texture_image = concatenate_image(
89
+ face_texture_image_1, face_texture_image_2
90
+ )
91
+
92
+ if image_input_1 == None and image_input_2 is not None:
93
+ (
94
+ results,
95
+ face_image,
96
+ face_landmarks_image,
97
+ face_symmetry_image,
98
+ face_texture_image,
99
+ ) = get_results(image_input_2)
100
+ results_df = pd.DataFrame([get_dict_child_data(results, 2)])
101
+
102
+ if image_input_2 == None and image_input_1 is not None:
103
+ (
104
+ results,
105
+ face_image,
106
+ face_landmarks_image,
107
+ face_symmetry_image,
108
+ face_texture_image,
109
+ ) = get_results(image_input_1)
110
+ results_df = pd.DataFrame([get_dict_child_data(results, 1)])
111
+
112
+ return (
113
+ results_df,
114
+ results_interpretation,
115
+ face_image,
116
+ face_landmarks_image,
117
+ face_symmetry_image,
118
+ face_texture_image,
119
+ )
120
+
121
 
122
  gigi_hadid = os.path.join(os.path.dirname(__file__), "data/gigi_hadid.webp")
 
123
 
124
  iface = gr.Interface(
125
+ fn=output_fn,
126
  inputs=[
127
+ gr.Image(type="pil", label="Upload Face 1", value=gigi_hadid),
128
+ gr.Image(type="pil", label="Upload Face 2"),
129
+ ],
130
  outputs=[
131
+ gr.DataFrame(label="Results"),
132
  gr.JSON(label="Results explainer"),
133
  gr.Image(type="pil", label="Extracted face"),
134
  gr.Image(type="pil", label="Face landmarks"),
135
  gr.Image(type="pil", label="Face symmetry"),
136
+ gr.Image(type="pil", label="Extracted face texture"),
137
  ],
138
  title="Advanced Facial Feature Detector",
139
+ description="""
 
140
  <!DOCTYPE html>
141
  <html lang="en">
142
  <head>
 
150
  </style>
151
  </head>
152
  <body>
153
+
154
  <div class="section">
155
+ <font size="3">
156
+ <h3><center>Turn your selfie into insights! Discover age and gender predictions, symmetry evaluations, and detailed proportions and texture analyses with our app.</center></h3>
157
+ <hr style="margin-top: 20px; margin-bottom: 20px;">
158
+ <p><strong>Instructions:</strong> Upload up to 2 photos. For optimal results, upload a clear front-facing image (see example). To do so, either drag and drop your photo or click <i>Upload Face</i>, then press <i>Submit</i>.</p>
159
  <p><strong>Other information:</strong></p>
160
  <ul>
161
+ <li>The output computation requires approximately 5 to 30 seconds.</li>
162
  <li>No uploaded photo is stored.</li>
 
163
  <li>If an error occurs try again or try a different photo or angle.</li>
164
+ <li>Once submitted, a section detailing the results and associated images will be displayed.</li>
165
  </ul>
166
+ </font>
167
+ </div>
168
  </body>
169
  </html>
170
  """,
notebooks/own-photos-symmetry.ipynb CHANGED
@@ -910,13 +910,6 @@
910
  "source": [
911
  "df.sort_values(\"mse\") "
912
  ]
913
- },
914
- {
915
- "cell_type": "code",
916
- "execution_count": null,
917
- "metadata": {},
918
- "outputs": [],
919
- "source": []
920
  }
921
  ],
922
  "metadata": {
 
910
  "source": [
911
  "df.sort_values(\"mse\") "
912
  ]
 
 
 
 
 
 
 
913
  }
914
  ],
915
  "metadata": {
parameters.yml CHANGED
@@ -15,15 +15,12 @@ results_interpretation:
15
  age_confidence: "Confidence of age prediction (0-1)"
16
  gender: "Predicted gender"
17
  gender_confidence: "Confidence of gender prediction (0-1)"
18
- Face proportions (golden ratio):
19
- Ideal ratio (golden ratio): "The ideal facial proportion ratio according to the classical Greek work on maths and geometry. Ideal facial features ratios in this section should be 1:1.62 (the golden ratio)"
20
- Top of nose to middle of mouth vs middle mouth to bottom of chin: "See description"
21
- Middle of mouth to bottom of mouth vs top of mouth to middle of mouth: "See description"
22
- Face proportions (equal ratio):
23
- Ideal ratio: "Typical facial features ratios in this section should be 1:1 (equal ratio)"
24
- Eye width vs distance between eyes: "See description"
25
- Eye to eyebrows vs eye height: "See description"
26
- Center of left to right eye vs mouth width: "See description"
27
  Face symmetry metrics:
28
  structural_similarity: "Range: -1 (opposite) to 1 (similar). Considers differences in structural information, luminance, and texture."
29
  cosine_distance: "Ranges: -1 to 1. 0 =similar, -1 or 1 = not similar. Considers differences in pixels."
@@ -34,4 +31,4 @@ results_interpretation:
34
  orb_detector_matches: "Higher is better. Counts the number of matches between keypoints in images."
35
  pixel_difference: "Lower is better. Minimum value is 0, maximum is unbounded."
36
  Face texture metrics:
37
- Texture std: "Lower means less varied facial texture. Minimum is 0, maximum is unbounded."
 
15
  age_confidence: "Confidence of age prediction (0-1)"
16
  gender: "Predicted gender"
17
  gender_confidence: "Confidence of gender prediction (0-1)"
18
+ Face proportions:
19
+ top_of_nose_to_middle_of_mouth_vs_middle_mouth_to_bottom_of_chin: "Ideal value is 1.62 (golden ratio - from classical Greek work on maths and geometry)"
20
+ middle_of_mouth_to_bottom_of_mouth_vs_top_of_mouth_to_middle_of_mouth: "Ideal value is 1.62 (golden ratio - from classical Greek work on maths and geometry)"
21
+ eye_width_vs_distance_between_eyes: "Ideal value is 1"
22
+ eye_to_eyebrows_vs_eye_height: "Ideal value is 1"
23
+ center_of_left_to_right_eye_vs_mouth_width: "Ideal value is 1"
 
 
 
24
  Face symmetry metrics:
25
  structural_similarity: "Range: -1 (opposite) to 1 (similar). Considers differences in structural information, luminance, and texture."
26
  cosine_distance: "Ranges: -1 to 1. 0 =similar, -1 or 1 = not similar. Considers differences in pixels."
 
31
  orb_detector_matches: "Higher is better. Counts the number of matches between keypoints in images."
32
  pixel_difference: "Lower is better. Minimum value is 0, maximum is unbounded."
33
  Face texture metrics:
34
+ texture_std: "Lower means less varied facial texture. Minimum is 0, maximum is unbounded."
requirements.txt CHANGED
@@ -8,3 +8,4 @@ pyyaml==6.0
8
  scikit-learn==1.2.2
9
  transformers==4.33.3
10
  torch==2.0.1
 
 
8
  scikit-learn==1.2.2
9
  transformers==4.33.3
10
  torch==2.0.1
11
+ pandas==1.5.3
src/cv_utils.py CHANGED
@@ -16,4 +16,4 @@ def resize_image_height(image: PILImage.Image, new_height=300) -> PILImage.Image
16
  aspect_ratio = image.width / image.height
17
  new_width = int(aspect_ratio * new_height)
18
  image = image.resize((new_width, new_height))
19
- return image
 
16
  aspect_ratio = image.width / image.height
17
  new_width = int(aspect_ratio * new_height)
18
  image = image.resize((new_width, new_height))
19
+ return image
src/face_demographics.py CHANGED
@@ -18,38 +18,57 @@ with open("parameters.yml", "r") as stream:
18
  class GetFaceDemographics:
19
  def __init__(self):
20
  pass
21
-
22
  @staticmethod
23
  def preprocess_image_for_caffe_cnn(image: np.array):
24
- model_mean = (78.4263377603, 87.7689143744, 114.895847746) # taken from the model page on Caffe
 
 
 
 
25
  blob = cv2.dnn.blobFromImage(image, 1.0, (227, 227), model_mean, swapRB=False)
26
  return blob
27
-
28
  @staticmethod
29
  def get_age_cnn(blob) -> Tuple:
30
- age_net = cv2.dnn.readNet(parameters["face_age"]["config"], parameters["face_age"]["model"])
31
- age_list = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
 
 
 
 
 
 
 
 
 
 
 
32
  age_net.setInput(blob)
33
  age_preds = age_net.forward()
34
  i = age_preds[0].argmax()
35
  age = age_list[i]
36
  age_confidence_score = age_preds[0][i]
37
  return age, age_confidence_score
38
-
39
  @staticmethod
40
  def get_gender_cnn(blob) -> Tuple:
41
- gender_net = cv2.dnn.readNet(parameters["face_gender"]["config"], parameters["face_gender"]["model"])
42
- gender_list = ['Male', 'Female']
 
 
43
  gender_net.setInput(blob)
44
  gender_preds = gender_net.forward()
45
  i = gender_preds[0].argmax()
46
  gender = gender_list[i]
47
  gender_confidence_score = gender_preds[0][i]
48
  return gender, gender_confidence_score
49
-
50
  @staticmethod
51
  def get_age_vit(image: np.array) -> Tuple:
52
- os.environ["CURL_CA_BUNDLE"] = "" # fixes VPN issue when connecting to hugging face hub
 
 
53
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
54
  id2label = {
55
  0: "0-2",
@@ -60,52 +79,59 @@ class GetFaceDemographics:
60
  5: "40-49",
61
  6: "50-59",
62
  7: "60-69",
63
- 8: "more than 70"
64
  }
65
- model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
66
- transforms = ViTImageProcessor.from_pretrained('nateraw/vit-age-classifier')
67
- inputs = transforms(image, return_tensors='pt')
68
  output = model(**inputs)
69
  proba = output.logits.softmax(1)
70
- preds = proba.argmax(1)
71
  age_confidence_score = round(max(proba[0]).item(), 2)
72
  age = id2label[int(preds)]
73
  return age, age_confidence_score
74
-
75
  @staticmethod
76
  def get_gender_vit(image: np.array) -> Tuple:
77
- os.environ["CURL_CA_BUNDLE"] = "" # fixes VPN issue when connecting to hugging face hub
 
 
78
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
79
  id2label = {
80
  0: "female",
81
  1: "male",
82
  }
83
- model = ViTForImageClassification.from_pretrained('rizvandwiki/gender-classification')
84
- transforms = ViTImageProcessor.from_pretrained('rizvandwiki/gender-classification')
85
- inputs = transforms(image, return_tensors='pt')
 
 
 
 
86
  output = model(**inputs)
87
  proba = output.logits.softmax(1)
88
- preds = proba.argmax(1)
89
  gender_confidence_score = round(max(proba[0]).item(), 2)
90
  gender = id2label[int(preds)]
91
  return gender, gender_confidence_score
92
 
93
  def main(self, image_input) -> dict:
94
- image = get_image(image_input)
95
  age, age_confidence_score = self.get_age_vit(image)
96
  gender, gender_confidence_score = self.get_gender_vit(image)
97
  d = {
98
  "age_range": age,
99
  "age_confidence": age_confidence_score,
100
  "gender": gender,
101
- "gender_confidence": gender_confidence_score
102
  }
103
  return d
104
 
 
105
  if __name__ == "__main__":
106
  path_to_images = "data/"
107
  image_files = os.listdir(path_to_images)
108
  for image in image_files:
109
  print(image)
110
  results = GetFaceDemographics().main(path_to_images + image)
111
- print(results)
 
18
  class GetFaceDemographics:
19
  def __init__(self):
20
  pass
21
+
22
  @staticmethod
23
  def preprocess_image_for_caffe_cnn(image: np.array):
24
+ model_mean = (
25
+ 78.4263377603,
26
+ 87.7689143744,
27
+ 114.895847746,
28
+ ) # taken from the model page on Caffe
29
  blob = cv2.dnn.blobFromImage(image, 1.0, (227, 227), model_mean, swapRB=False)
30
  return blob
31
+
32
  @staticmethod
33
  def get_age_cnn(blob) -> Tuple:
34
+ age_net = cv2.dnn.readNet(
35
+ parameters["face_age"]["config"], parameters["face_age"]["model"]
36
+ )
37
+ age_list = [
38
+ "(0-2)",
39
+ "(4-6)",
40
+ "(8-12)",
41
+ "(15-20)",
42
+ "(25-32)",
43
+ "(38-43)",
44
+ "(48-53)",
45
+ "(60-100)",
46
+ ]
47
  age_net.setInput(blob)
48
  age_preds = age_net.forward()
49
  i = age_preds[0].argmax()
50
  age = age_list[i]
51
  age_confidence_score = age_preds[0][i]
52
  return age, age_confidence_score
53
+
54
  @staticmethod
55
  def get_gender_cnn(blob) -> Tuple:
56
+ gender_net = cv2.dnn.readNet(
57
+ parameters["face_gender"]["config"], parameters["face_gender"]["model"]
58
+ )
59
+ gender_list = ["Male", "Female"]
60
  gender_net.setInput(blob)
61
  gender_preds = gender_net.forward()
62
  i = gender_preds[0].argmax()
63
  gender = gender_list[i]
64
  gender_confidence_score = gender_preds[0][i]
65
  return gender, gender_confidence_score
66
+
67
  @staticmethod
68
  def get_age_vit(image: np.array) -> Tuple:
69
+ os.environ[
70
+ "CURL_CA_BUNDLE"
71
+ ] = "" # fixes VPN issue when connecting to hugging face hub
72
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
73
  id2label = {
74
  0: "0-2",
 
79
  5: "40-49",
80
  6: "50-59",
81
  7: "60-69",
82
+ 8: "more than 70",
83
  }
84
+ model = ViTForImageClassification.from_pretrained("nateraw/vit-age-classifier")
85
+ transforms = ViTImageProcessor.from_pretrained("nateraw/vit-age-classifier")
86
+ inputs = transforms(image, return_tensors="pt")
87
  output = model(**inputs)
88
  proba = output.logits.softmax(1)
89
+ preds = proba.argmax(1)
90
  age_confidence_score = round(max(proba[0]).item(), 2)
91
  age = id2label[int(preds)]
92
  return age, age_confidence_score
93
+
94
  @staticmethod
95
  def get_gender_vit(image: np.array) -> Tuple:
96
+ os.environ[
97
+ "CURL_CA_BUNDLE"
98
+ ] = "" # fixes VPN issue when connecting to hugging face hub
99
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
100
  id2label = {
101
  0: "female",
102
  1: "male",
103
  }
104
+ model = ViTForImageClassification.from_pretrained(
105
+ "rizvandwiki/gender-classification"
106
+ )
107
+ transforms = ViTImageProcessor.from_pretrained(
108
+ "rizvandwiki/gender-classification"
109
+ )
110
+ inputs = transforms(image, return_tensors="pt")
111
  output = model(**inputs)
112
  proba = output.logits.softmax(1)
113
+ preds = proba.argmax(1)
114
  gender_confidence_score = round(max(proba[0]).item(), 2)
115
  gender = id2label[int(preds)]
116
  return gender, gender_confidence_score
117
 
118
  def main(self, image_input) -> dict:
119
+ image = get_image(image_input)
120
  age, age_confidence_score = self.get_age_vit(image)
121
  gender, gender_confidence_score = self.get_gender_vit(image)
122
  d = {
123
  "age_range": age,
124
  "age_confidence": age_confidence_score,
125
  "gender": gender,
126
+ "gender_confidence": gender_confidence_score,
127
  }
128
  return d
129
 
130
+
131
  if __name__ == "__main__":
132
  path_to_images = "data/"
133
  image_files = os.listdir(path_to_images)
134
  for image in image_files:
135
  print(image)
136
  results = GetFaceDemographics().main(path_to_images + image)
137
+ print(results)
src/face_proportions.py CHANGED
@@ -13,12 +13,12 @@ with open("parameters.yml", "r") as stream:
13
  parameters = yaml.safe_load(stream)
14
  except yaml.YAMLError as exc:
15
  print(exc)
16
-
17
-
18
  class GetFaceProportions:
19
  def __init__(self):
20
- self.golden_ratio = 1.618
21
-
22
  @staticmethod
23
  def preprocess_image(image: np.array) -> np.array:
24
  image = imutils.resize(image, width=500)
@@ -27,80 +27,100 @@ class GetFaceProportions:
27
 
28
  @staticmethod
29
  def detect_face_landmarks(gray_image: np.array) -> List[Union[np.array, np.array]]:
30
-
31
  detector = dlib.get_frontal_face_detector()
32
  predictor = dlib.shape_predictor(parameters["face_landmarks"]["model"])
33
  rects = detector(gray_image, 1)
34
  for rect in rects:
35
  shape = predictor(gray_image, rect)
36
- shape = np.array([(shape.part(i).x, shape.part(i).y) for i in range(shape.num_parts)])
 
 
37
 
38
  # Draw facial landmarks
39
  for (x, y) in shape:
40
  cv2.circle(gray_image, (x, y), 2, (0, 255, 0), -1)
41
-
42
  return shape, gray_image
43
-
44
- def compute_golden_ratios(self, shape: np.array) -> dict:
 
45
  top_mouth, middle_mouth, bottom_mouth = shape[51], shape[62], shape[57]
46
  top_nose, bottom_nose = shape[27], shape[33]
47
  bottom_chin = shape[8]
48
-
49
  # 1
50
- top_nose_to_middle_mouth_dist = np.linalg.norm(top_nose - middle_mouth) # euclidean distance
 
 
51
  middle_mouth_to_bottom_chin_dist = np.linalg.norm(middle_mouth - bottom_chin)
52
- ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin = top_nose_to_middle_mouth_dist/middle_mouth_to_bottom_chin_dist
53
-
 
 
54
  # 2
55
  top_mouth_to_middle_mouth_dist = np.linalg.norm(top_mouth - middle_mouth)
56
  middle_mouth_to_bottom_mouth_dist = np.linalg.norm(middle_mouth - bottom_mouth)
57
- ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth = middle_mouth_to_bottom_mouth_dist/top_mouth_to_middle_mouth_dist
58
-
 
 
59
  golden_ratios = {
60
- "Ideal ratio (golden ratio)": self.golden_ratio,
61
- "Top of nose to middle of mouth vs middle mouth to bottom of chin": ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin,
62
- "Middle of mouth to bottom of mouth vs top of mouth to middle of mouth": ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth
63
  }
64
  return golden_ratios
65
-
66
  @staticmethod
67
  def compute_equal_ratios(shape: np.array) -> dict:
68
- left_side_left_eye, right_side_left_eye, left_side_right_eye, right_side_right_eye = shape[36], shape[39], shape[42], shape[45]
69
- left_eye_top, left_eye_bottom, right_eye_top, right_eye_bottom = shape[37], shape[41], shape[44], shape[46]
 
 
 
 
 
 
 
 
 
 
70
  left_eyebrow_top, right_eyebrow_top = shape[19], shape[24]
71
  left_eye_center = np.mean([shape[37], shape[38], shape[41], shape[40]], axis=0)
72
  right_eye_center = np.mean([shape[43], shape[44], shape[47], shape[46]], axis=0)
73
  left_mouth, right_mouth = shape[48], shape[54]
74
-
75
  # 1
76
  left_eye_dist = np.linalg.norm(left_side_left_eye - right_side_left_eye)
77
  right_eye_dist = np.linalg.norm(left_side_right_eye - right_side_right_eye)
78
- average_eye_dist = (left_eye_dist + right_eye_dist)/2
79
  between_eye_dist = np.linalg.norm(right_side_left_eye - left_side_right_eye)
80
- ratio_eyes_width_vs_between_eye = average_eye_dist/between_eye_dist
81
-
82
  # 2
83
  left_eye_to_eyebrow_dist = np.linalg.norm(left_eyebrow_top - left_eye_top)
84
  right_eye_to_eyebrow_dist = np.linalg.norm(right_eyebrow_top - right_eye_top)
85
- eye_to_eyebrow_dist = (left_eye_to_eyebrow_dist + right_eye_to_eyebrow_dist)/2
86
  left_eye_height = np.linalg.norm(left_eye_top - left_eye_bottom)
87
  right_eye_height = np.linalg.norm(right_eye_top - right_eye_bottom)
88
- eye_height = (left_eye_height + right_eye_height)/2
89
- ratio_eye_to_eyebrow_vs_eye_height = eye_to_eyebrow_dist/eye_height
90
-
91
  # 3
92
- left_to_right_eye_center_dist = np.linalg.norm(left_eye_center - right_eye_center)
 
 
93
  mouth_width = np.linalg.norm(left_mouth - right_mouth)
94
- ratio_left_to_right_eye_center_vs_mouth_width = left_to_right_eye_center_dist/mouth_width
95
-
 
 
96
  equal_ratios = {
97
- "Ideal ratio": 1,
98
- "Eye width vs distance between eyes": ratio_eyes_width_vs_between_eye,
99
- "Eye to eyebrows vs eye height": ratio_eye_to_eyebrow_vs_eye_height,
100
- "Center of left to right eye vs mouth width": ratio_left_to_right_eye_center_vs_mouth_width
101
  }
102
  return equal_ratios
103
-
104
 
105
  def main(self, image_input):
106
  image = get_image(image_input)
@@ -110,10 +130,11 @@ class GetFaceProportions:
110
  golden_ratios = {k: round(v, 2) for k, v in golden_ratios.items()}
111
  equal_ratios = self.compute_equal_ratios(shape)
112
  equal_ratios = {k: round(v, 2) for k, v in equal_ratios.items()}
113
- image = PILImage.fromarray(image)
114
  image = resize_image_height(image, new_height=300)
115
- return golden_ratios, equal_ratios, image
116
-
 
117
 
118
  if __name__ == "__main__":
119
  path_to_images = "data/"
@@ -121,4 +142,4 @@ if __name__ == "__main__":
121
  for image in image_files:
122
  print(image)
123
  results = GetFaceProportions().main(path_to_images + image)
124
- print(results)
 
13
  parameters = yaml.safe_load(stream)
14
  except yaml.YAMLError as exc:
15
  print(exc)
16
+
17
+
18
  class GetFaceProportions:
19
  def __init__(self):
20
+ pass
21
+
22
  @staticmethod
23
  def preprocess_image(image: np.array) -> np.array:
24
  image = imutils.resize(image, width=500)
 
27
 
28
  @staticmethod
29
  def detect_face_landmarks(gray_image: np.array) -> List[Union[np.array, np.array]]:
30
+
31
  detector = dlib.get_frontal_face_detector()
32
  predictor = dlib.shape_predictor(parameters["face_landmarks"]["model"])
33
  rects = detector(gray_image, 1)
34
  for rect in rects:
35
  shape = predictor(gray_image, rect)
36
+ shape = np.array(
37
+ [(shape.part(i).x, shape.part(i).y) for i in range(shape.num_parts)]
38
+ )
39
 
40
  # Draw facial landmarks
41
  for (x, y) in shape:
42
  cv2.circle(gray_image, (x, y), 2, (0, 255, 0), -1)
43
+
44
  return shape, gray_image
45
+
46
+ @staticmethod
47
+ def compute_golden_ratios(shape: np.array) -> dict:
48
  top_mouth, middle_mouth, bottom_mouth = shape[51], shape[62], shape[57]
49
  top_nose, bottom_nose = shape[27], shape[33]
50
  bottom_chin = shape[8]
51
+
52
  # 1
53
+ top_nose_to_middle_mouth_dist = np.linalg.norm(
54
+ top_nose - middle_mouth
55
+ ) # euclidean distance
56
  middle_mouth_to_bottom_chin_dist = np.linalg.norm(middle_mouth - bottom_chin)
57
+ ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin = (
58
+ top_nose_to_middle_mouth_dist / middle_mouth_to_bottom_chin_dist
59
+ )
60
+
61
  # 2
62
  top_mouth_to_middle_mouth_dist = np.linalg.norm(top_mouth - middle_mouth)
63
  middle_mouth_to_bottom_mouth_dist = np.linalg.norm(middle_mouth - bottom_mouth)
64
+ ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth = (
65
+ middle_mouth_to_bottom_mouth_dist / top_mouth_to_middle_mouth_dist
66
+ )
67
+
68
  golden_ratios = {
69
+ "top_of_nose_to_middle_of_mouth_vs_middle_mouth_to_bottom_of_chin": ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin,
70
+ "middle_of_mouth_to_bottom_of_mouth_vs_top_of_mouth_to_middle_of_mouth": ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth,
 
71
  }
72
  return golden_ratios
73
+
74
  @staticmethod
75
  def compute_equal_ratios(shape: np.array) -> dict:
76
+ (
77
+ left_side_left_eye,
78
+ right_side_left_eye,
79
+ left_side_right_eye,
80
+ right_side_right_eye,
81
+ ) = (shape[36], shape[39], shape[42], shape[45])
82
+ left_eye_top, left_eye_bottom, right_eye_top, right_eye_bottom = (
83
+ shape[37],
84
+ shape[41],
85
+ shape[44],
86
+ shape[46],
87
+ )
88
  left_eyebrow_top, right_eyebrow_top = shape[19], shape[24]
89
  left_eye_center = np.mean([shape[37], shape[38], shape[41], shape[40]], axis=0)
90
  right_eye_center = np.mean([shape[43], shape[44], shape[47], shape[46]], axis=0)
91
  left_mouth, right_mouth = shape[48], shape[54]
92
+
93
  # 1
94
  left_eye_dist = np.linalg.norm(left_side_left_eye - right_side_left_eye)
95
  right_eye_dist = np.linalg.norm(left_side_right_eye - right_side_right_eye)
96
+ average_eye_dist = (left_eye_dist + right_eye_dist) / 2
97
  between_eye_dist = np.linalg.norm(right_side_left_eye - left_side_right_eye)
98
+ ratio_eyes_width_vs_between_eye = average_eye_dist / between_eye_dist
99
+
100
  # 2
101
  left_eye_to_eyebrow_dist = np.linalg.norm(left_eyebrow_top - left_eye_top)
102
  right_eye_to_eyebrow_dist = np.linalg.norm(right_eyebrow_top - right_eye_top)
103
+ eye_to_eyebrow_dist = (left_eye_to_eyebrow_dist + right_eye_to_eyebrow_dist) / 2
104
  left_eye_height = np.linalg.norm(left_eye_top - left_eye_bottom)
105
  right_eye_height = np.linalg.norm(right_eye_top - right_eye_bottom)
106
+ eye_height = (left_eye_height + right_eye_height) / 2
107
+ ratio_eye_to_eyebrow_vs_eye_height = eye_to_eyebrow_dist / eye_height
108
+
109
  # 3
110
+ left_to_right_eye_center_dist = np.linalg.norm(
111
+ left_eye_center - right_eye_center
112
+ )
113
  mouth_width = np.linalg.norm(left_mouth - right_mouth)
114
+ ratio_left_to_right_eye_center_vs_mouth_width = (
115
+ left_to_right_eye_center_dist / mouth_width
116
+ )
117
+
118
  equal_ratios = {
119
+ "eye_width_vs_distance_between_eyes": ratio_eyes_width_vs_between_eye,
120
+ "eye_to_eyebrows_vs_eye_height": ratio_eye_to_eyebrow_vs_eye_height,
121
+ "center_of_left_to_right_eye_vs_mouth_width": ratio_left_to_right_eye_center_vs_mouth_width,
 
122
  }
123
  return equal_ratios
 
124
 
125
  def main(self, image_input):
126
  image = get_image(image_input)
 
130
  golden_ratios = {k: round(v, 2) for k, v in golden_ratios.items()}
131
  equal_ratios = self.compute_equal_ratios(shape)
132
  equal_ratios = {k: round(v, 2) for k, v in equal_ratios.items()}
133
+ image = PILImage.fromarray(image)
134
  image = resize_image_height(image, new_height=300)
135
+ ratios = {**golden_ratios, **equal_ratios}
136
+ return ratios, image
137
+
138
 
139
  if __name__ == "__main__":
140
  path_to_images = "data/"
 
142
  for image in image_files:
143
  print(image)
144
  results = GetFaceProportions().main(path_to_images + image)
145
+ print(results)
src/face_symmetry.py CHANGED
@@ -118,7 +118,9 @@ class GetFaceSymmetry:
118
  )
119
  (startX, startY, endX, endY) = box.astype("int")
120
  face = image[startY:endY, startX:endX]
121
- if face.shape[0] != 0: # temp fix bug where image of dim (0, 0, 3) appear
 
 
122
  face = self.postprocess_face(face)
123
  left_half, right_half = self.get_face_halves(face)
124
  d = self.get_face_similarity_results(left_half, right_half)
@@ -133,7 +135,7 @@ class GetFaceSymmetry:
133
 
134
  full_face = np.hstack((best_left_half, best_right_half))
135
  full_face_image = PILImage.fromarray(full_face)
136
- full_face_image = resize_image_height(full_face_image, new_height=300)
137
  best_face_data = {k: float(round(v, 2)) for k, v in best_face_data.items()}
138
  return full_face_image, best_face_data
139
 
 
118
  )
119
  (startX, startY, endX, endY) = box.astype("int")
120
  face = image[startY:endY, startX:endX]
121
+ if (
122
+ face.shape[0] != 0
123
+ ): # temp fix bug where image of dim (0, 0, 3) appear
124
  face = self.postprocess_face(face)
125
  left_half, right_half = self.get_face_halves(face)
126
  d = self.get_face_similarity_results(left_half, right_half)
 
135
 
136
  full_face = np.hstack((best_left_half, best_right_half))
137
  full_face_image = PILImage.fromarray(full_face)
138
+ full_face_image = resize_image_height(full_face_image, new_height=300)
139
  best_face_data = {k: float(round(v, 2)) for k, v in best_face_data.items()}
140
  return full_face_image, best_face_data
141
 
src/face_texture.py CHANGED
@@ -31,7 +31,7 @@ class GetFaceTexture:
31
  faces[0].width(),
32
  faces[0].height(),
33
  )
34
- face_image = gray_image[y: y + h, x: x + w]
35
  return face_image
36
 
37
  @staticmethod
@@ -58,9 +58,9 @@ class GetFaceTexture:
58
  lbp, std = self.get_face_texture(face_image)
59
  face_texture_image = self.postprocess_image(lbp)
60
  face_image = PILImage.fromarray(face_image)
61
- face_image = resize_image_height(face_image, new_height=300)
62
- face_texture_image = resize_image_height(face_texture_image, new_height=300)
63
- return face_image, face_texture_image, {"Texture std": round(std, 2)}
64
 
65
 
66
  if __name__ == "__main__":
 
31
  faces[0].width(),
32
  faces[0].height(),
33
  )
34
+ face_image = gray_image[y : y + h, x : x + w]
35
  return face_image
36
 
37
  @staticmethod
 
58
  lbp, std = self.get_face_texture(face_image)
59
  face_texture_image = self.postprocess_image(lbp)
60
  face_image = PILImage.fromarray(face_image)
61
+ face_image = resize_image_height(face_image, new_height=300)
62
+ face_texture_image = resize_image_height(face_texture_image, new_height=300)
63
+ return face_image, face_texture_image, {"texture_std": round(std, 2)}
64
 
65
 
66
  if __name__ == "__main__":