Justin Grammens commited on
Commit
2aca9bf
·
1 Parent(s): b0fba4a

latest updates

Browse files
Files changed (1) hide show
  1. app.py +85 -18
app.py CHANGED
@@ -11,7 +11,9 @@ def classify_face_shape(image):
11
  pipe = pipeline("image-classification", model="metadome/face_shape_classification")
12
 
13
  # Run the pipeline on the uploaded image
14
- output = pipe(image)
 
 
15
  # Log the output for debugging
16
  print("Pipeline output for shape:", output)
17
  # Format the output to be compatible with gr.outputs.Label
@@ -22,7 +24,8 @@ def classify_face_shape(image):
22
  def classify_age(image):
23
  pipe = pipeline("image-classification", model="nateraw/vit-age-classifier")
24
  # Run the pipeline on the uploaded image
25
- output = pipe(image)
 
26
 
27
  print("Pipeline output for age:", output)
28
  # Format the output to be compatible with gr.outputs.Label
@@ -34,7 +37,8 @@ def classify_skin_type(image):
34
  pipe = pipeline("image-classification", model="dima806/skin_types_image_detection")
35
 
36
  # Run the pipeline on the uploaded image
37
- output = pipe(image)
 
38
 
39
  print("Pipeline output for skin_type:", output)
40
  # Format the output to be compatible with gr.outputs.Label
@@ -46,7 +50,8 @@ def classify_acne_type(image):
46
  pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-acne")
47
 
48
  # Run the pipeline on the uploaded image
49
- output = pipe(image)
 
50
 
51
  print("Pipeline output for acne:", output)
52
  # Format the output to be compatible with gr.outputs.Label
@@ -88,11 +93,14 @@ def classify_eye_color(image):
88
 
89
  # Run the pipeline on the uploaded image
90
  #output = pipe(image)
 
91
  output = pipe("eye_regions.jpg") #use the eye_regions image instead
92
 
93
  print("Pipeline output for eye color:", output)
94
  # Format the output to be compatible with gr.outputs.Label
95
  formatted_output = {item['label']: item['score'] for item in output}
 
 
96
 
97
  return formatted_output
98
 
@@ -103,21 +111,14 @@ def process_gradio_image(pil_image):
103
  return image
104
 
105
  def classify_race(image):
106
- '''
107
- "0": "East Asian",
108
- "1": "Indian",
109
- "2": "Black",
110
- "3": "White",
111
- "4": "Middle Eastern",
112
- "5": "Latino_Hispanic",
113
- "6": "Southeast Asian"
114
- '''
115
- pipe = pipeline("image-classification", model="crangana/trained-race")
116
  # Run the pipeline on the uploaded image
117
  output = pipe("face_region.jpg")
118
 
119
  # Format the output to be compatible with gr.outputs.Label
120
  formatted_output = {item['label']: item['score'] for item in output}
 
 
121
 
122
  return formatted_output
123
 
@@ -128,6 +129,12 @@ def classify_gender(image):
128
  formatted_output = {item['label']: item['score'] for item in output}
129
  return formatted_output
130
 
 
 
 
 
 
 
131
 
132
  def classify_image_with_multiple_models(image):
133
  create_eye_region(image)
@@ -140,8 +147,9 @@ def classify_image_with_multiple_models(image):
140
  eye_color = classify_eye_color(image)
141
  race = classify_race(image)
142
  gender = classify_gender(image)
 
143
 
144
- return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color, race, gender, Image.open("segmented_face.jpg")
145
 
146
  def create_eye_region(image):
147
  # Load the pre-trained face detector
@@ -171,12 +179,14 @@ def create_eye_region(image):
171
  eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=10, minSize=(20, 20))
172
 
173
  eye_positions = []
174
- for (ex, ey, ew, eh) in eyes:
 
175
  # Ensure eyes are within the upper half of the face region
176
  if ey + eh < h // 2:
177
  eye_positions.append((ex, ey, ew, eh))
178
 
179
- for (ex, ey, ew, eh) in eyes:
 
180
  # Draw a rectangle around the eyes
181
  cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
182
 
@@ -187,6 +197,10 @@ def create_eye_region(image):
187
  # Calculate the average color of the eye region
188
  avg_color = np.mean(eye_roi, axis=(0, 1))
189
 
 
 
 
 
190
  # Classify eye color based on average color
191
  #if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]:
192
  # color = "Brown"
@@ -196,11 +210,63 @@ def create_eye_region(image):
196
  # color = "Blue"
197
 
198
  # Display the eye color
199
- #cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
200
 
201
  cv2.imwrite('segmented_face.jpg', image)
202
 
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  # Create the Gradio interface
205
  demo = gr.Interface(
206
  fn=classify_image_with_multiple_models, # The function to run
@@ -215,6 +281,7 @@ demo = gr.Interface(
215
  gr.Label(num_top_classes=5, label="Eye Color"),
216
  gr.Label(num_top_classes=7, label="Race"),
217
  gr.Label(num_top_classes=2, label="Gender"),
 
218
  gr.Image(type="pil", label="Segmented Face", value="segmented_face.jpg") # Provide the path to the image
219
  ],
220
  title="Multiple Model Classification",
 
11
  pipe = pipeline("image-classification", model="metadome/face_shape_classification")
12
 
13
  # Run the pipeline on the uploaded image
14
+ #output = pipe(image)
15
+ output = pipe("face_region.jpg") # use the face_region image instead
16
+
17
  # Log the output for debugging
18
  print("Pipeline output for shape:", output)
19
  # Format the output to be compatible with gr.outputs.Label
 
24
  def classify_age(image):
25
  pipe = pipeline("image-classification", model="nateraw/vit-age-classifier")
26
  # Run the pipeline on the uploaded image
27
+ #output = pipe(image)
28
+ output = pipe("face_region.jpg") # use the face_region image instead
29
 
30
  print("Pipeline output for age:", output)
31
  # Format the output to be compatible with gr.outputs.Label
 
37
  pipe = pipeline("image-classification", model="dima806/skin_types_image_detection")
38
 
39
  # Run the pipeline on the uploaded image
40
+ #output = pipe(image)
41
+ output = pipe("face_region.jpg") # use the face_region image instead
42
 
43
  print("Pipeline output for skin_type:", output)
44
  # Format the output to be compatible with gr.outputs.Label
 
50
  pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-acne")
51
 
52
  # Run the pipeline on the uploaded image
53
+ #output = pipe(image)
54
+ output = pipe("face_region.jpg") # use the face_region image instead
55
 
56
  print("Pipeline output for acne:", output)
57
  # Format the output to be compatible with gr.outputs.Label
 
93
 
94
  # Run the pipeline on the uploaded image
95
  #output = pipe(image)
96
+ print("WEARE USING THIS CODE TO GET THE RESULT FOR EYE COLOR!!!!!!")
97
  output = pipe("eye_regions.jpg") #use the eye_regions image instead
98
 
99
  print("Pipeline output for eye color:", output)
100
  # Format the output to be compatible with gr.outputs.Label
101
  formatted_output = {item['label']: item['score'] for item in output}
102
+
103
+ print("THIS IS FORMATTED OUTPUT!" + str(formatted_output))
104
 
105
  return formatted_output
106
 
 
111
  return image
112
 
113
  def classify_race(image):
114
+ pipe = pipeline("image-classification", model="cledoux42/Ethnicity_Test_v003")
 
 
 
 
 
 
 
 
 
115
  # Run the pipeline on the uploaded image
116
  output = pipe("face_region.jpg")
117
 
118
  # Format the output to be compatible with gr.outputs.Label
119
  formatted_output = {item['label']: item['score'] for item in output}
120
+
121
+ print(formatted_output)
122
 
123
  return formatted_output
124
 
 
129
  formatted_output = {item['label']: item['score'] for item in output}
130
  return formatted_output
131
 
132
+ def classify_wrinkles(image):
133
+ pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-wrinkles")
134
+ output = pipe("face_region.jpg")
135
+ # Format the output to be compatible with gr.outputs.Label
136
+ formatted_output = {item['label']: item['score'] for item in output}
137
+ return formatted_output
138
 
139
  def classify_image_with_multiple_models(image):
140
  create_eye_region(image)
 
147
  eye_color = classify_eye_color(image)
148
  race = classify_race(image)
149
  gender = classify_gender(image)
150
+ wrinkles = classify_wrinkles(image)
151
 
152
+ return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color, race, gender, wrinkles, Image.open("segmented_face.jpg")
153
 
154
  def create_eye_region(image):
155
  # Load the pre-trained face detector
 
179
  eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=10, minSize=(20, 20))
180
 
181
  eye_positions = []
182
+ #for (ex, ey, ew, eh) in eyes:
183
+ for (ex, ey, ew, eh) in eyes[:1]:
184
  # Ensure eyes are within the upper half of the face region
185
  if ey + eh < h // 2:
186
  eye_positions.append((ex, ey, ew, eh))
187
 
188
+ #for (ex, ey, ew, eh) in eyes:
189
+ for (ex, ey, ew, eh) in eyes[:1]:
190
  # Draw a rectangle around the eyes
191
  cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
192
 
 
197
  # Calculate the average color of the eye region
198
  avg_color = np.mean(eye_roi, axis=(0, 1))
199
 
200
+ print("Average color:", avg_color)
201
+ #color = "NULL"
202
+
203
+ color = classify_eye_color_opencv(avg_color)
204
  # Classify eye color based on average color
205
  #if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]:
206
  # color = "Brown"
 
210
  # color = "Blue"
211
 
212
  # Display the eye color
213
+ cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
214
 
215
  cv2.imwrite('segmented_face.jpg', image)
216
 
217
 
218
+
219
+ def classify_eye_color_opencv(avg_color):
220
+ """
221
+ Classify eye color based on average BGR values from cv2 image.
222
+
223
+ Args:
224
+ avg_color: numpy array containing [B, G, R] values
225
+
226
+ Returns:
227
+ str: classified eye color
228
+ """
229
+ # Convert BGR to RGB (since OpenCV uses BGR)
230
+
231
+ #avg_color = np.mean(avg_color, axis=(0, 1))
232
+ b, g, r = avg_color
233
+
234
+ # Define color ranges for each eye color (in BGR)
235
+ # These thresholds may need adjustment based on your specific lighting conditions
236
+
237
+ # Check brown eyes (darker, red-dominant)
238
+ if r > g and r > b and r > 100:
239
+ if g < 90 and b < 90:
240
+ return "brown"
241
+
242
+ # Check amber eyes (golden-brown)
243
+ if r > 150 and g > 100 and b < 100:
244
+ if r > g > b:
245
+ return "amber"
246
+
247
+ # Check hazel eyes (mix of brown and green)
248
+ if g > 100 and r > 100 and b < 100:
249
+ if abs(r - g) < 40:
250
+ return "hazel"
251
+
252
+ # Check green eyes (green-dominant)
253
+ if g > r and g > b:
254
+ if g > 100:
255
+ return "green"
256
+
257
+ # Check blue eyes (blue-dominant)
258
+ if b > r and b > g:
259
+ if b > 100:
260
+ return "blue"
261
+
262
+ # Check gray eyes (all values similar)
263
+ if abs(r - g) < 20 and abs(g - b) < 20 and abs(r - b) < 20:
264
+ if r > 100 and g > 100 and b > 100:
265
+ return "gray"
266
+
267
+ return "undefined"
268
+
269
+
270
  # Create the Gradio interface
271
  demo = gr.Interface(
272
  fn=classify_image_with_multiple_models, # The function to run
 
281
  gr.Label(num_top_classes=5, label="Eye Color"),
282
  gr.Label(num_top_classes=7, label="Race"),
283
  gr.Label(num_top_classes=2, label="Gender"),
284
+ gr.Label(num_top_classes=2, label="Wrinkles"),
285
  gr.Image(type="pil", label="Segmented Face", value="segmented_face.jpg") # Provide the path to the image
286
  ],
287
  title="Multiple Model Classification",