Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline | |
from PIL import Image | |
import cv2 | |
import numpy as np | |
# Function to classify the face shape | |
def classify_face_shape(image): | |
# Initialize the pipeline | |
pipe = pipeline("image-classification", model="metadome/face_shape_classification") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
output = pipe("face_region.jpg") # use the face_region image instead | |
# Log the output for debugging | |
print("Pipeline output for shape:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_age(image): | |
pipe = pipeline("image-classification", model="nateraw/vit-age-classifier") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
output = pipe("face_region.jpg") # use the face_region image instead | |
print("Pipeline output for age:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_skin_type(image): | |
pipe = pipeline("image-classification", model="dima806/skin_types_image_detection") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
output = pipe("face_region.jpg") # use the face_region image instead | |
print("Pipeline output for skin_type:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_acne_type(image): | |
pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-acne") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
output = pipe("face_region.jpg") # use the face_region image instead | |
print("Pipeline output for acne:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_hair_color(image): | |
#pipe = pipeline("image-classification", model="enzostvs/hair-color") | |
pipe = pipeline("image-classification", model="londe33/hair_v02") | |
# Run the pipeline on the uploaded image | |
output = pipe(image) | |
print("Pipeline output for hair color:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_eye_shape(image): | |
pipe = pipeline("image-classification", model="justingrammens/eye-shape") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
output = pipe("eye_regions.jpg") # use the eye_regions image instead | |
print("Pipeline output for eye shape:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_eye_color(image): | |
pipe = pipeline("image-classification", model="justingrammens/eye-color") | |
# Run the pipeline on the uploaded image | |
#output = pipe(image) | |
print("WEARE USING THIS CODE TO GET THE RESULT FOR EYE COLOR!!!!!!") | |
output = pipe("eye_regions.jpg") #use the eye_regions image instead | |
print("Pipeline output for eye color:", output) | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
print("THIS IS FORMATTED OUTPUT!" + str(formatted_output)) | |
return formatted_output | |
def process_gradio_image(pil_image): | |
# Convert PIL image to NumPy array | |
image = np.array(pil_image) | |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB (from PIL) to BGR (OpenCV default) | |
return image | |
def classify_race(image): | |
pipe = pipeline("image-classification", model="cledoux42/Ethnicity_Test_v003") | |
# Run the pipeline on the uploaded image | |
output = pipe("face_region.jpg") | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
print(formatted_output) | |
return formatted_output | |
def classify_gender(image): | |
pipe = pipeline("image-classification", model="rizvandwiki/gender-classification") | |
output = pipe("face_region.jpg") | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_wrinkles(image): | |
pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-wrinkles") | |
output = pipe("face_region.jpg") | |
# Format the output to be compatible with gr.outputs.Label | |
formatted_output = {item['label']: item['score'] for item in output} | |
return formatted_output | |
def classify_image_with_multiple_models(image): | |
create_eye_region(image) | |
face_shape_result = classify_face_shape(image) | |
age_result = classify_age(image) | |
skin_type_result = classify_skin_type(image) | |
acne_results = classify_acne_type(image) | |
hair_color_results = classify_hair_color(image) | |
eye_shape = classify_eye_shape(image) | |
eye_color = classify_eye_color(image) | |
race = classify_race(image) | |
gender = classify_gender(image) | |
wrinkles = classify_wrinkles(image) | |
return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color, race, gender, wrinkles, Image.open("segmented_face.jpg") | |
def create_eye_region(image): | |
# Load the pre-trained face detector | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml') | |
image = process_gradio_image(image) | |
# Convert the image to grayscale | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
# Detect faces in the image | |
faces = face_cascade.detectMultiScale(gray, 1.3, 5) | |
for (x, y, w, h) in faces: | |
# Draw a rectangle around the face | |
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2) | |
# Extract the face region | |
face_roi = image[y:y + h, x:x + w] | |
cv2.imwrite('face_region.jpg', face_roi) | |
# Region of Interest (ROI) for the face | |
roi_gray = gray[y:y + h, x:x + w] | |
roi_color = image[y:y + h, x:x + w] | |
# Detect eyes in the face ROI | |
eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=10, minSize=(20, 20)) | |
eye_positions = [] | |
#for (ex, ey, ew, eh) in eyes: | |
for (ex, ey, ew, eh) in eyes[:1]: | |
# Ensure eyes are within the upper half of the face region | |
if ey + eh < h // 2: | |
eye_positions.append((ex, ey, ew, eh)) | |
#for (ex, ey, ew, eh) in eyes: | |
for (ex, ey, ew, eh) in eyes[:1]: | |
# Draw a rectangle around the eyes | |
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) | |
# Extract the eye region | |
eye_roi = roi_color[ey:ey + eh, ex:ex + ew] | |
cv2.imwrite('eye_regions.jpg', eye_roi) | |
# Calculate the average color of the eye region | |
avg_color = np.mean(eye_roi, axis=(0, 1)) | |
print("Average color:", avg_color) | |
#color = "NULL" | |
color = classify_eye_color_opencv(avg_color) | |
# Classify eye color based on average color | |
#if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]: | |
# color = "Brown" | |
#elif avg_color[1] > avg_color[0] and avg_color[1] > avg_color[2]: | |
# color = "Green" | |
#else: | |
# color = "Blue" | |
# Display the eye color | |
cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) | |
cv2.imwrite('segmented_face.jpg', image) | |
def classify_eye_color_opencv(avg_color): | |
""" | |
Classify eye color based on average BGR values from cv2 image. | |
Args: | |
avg_color: numpy array containing [B, G, R] values | |
Returns: | |
str: classified eye color | |
""" | |
# Convert BGR to RGB (since OpenCV uses BGR) | |
#avg_color = np.mean(avg_color, axis=(0, 1)) | |
b, g, r = avg_color | |
# Define color ranges for each eye color (in BGR) | |
# These thresholds may need adjustment based on your specific lighting conditions | |
# Check brown eyes (darker, red-dominant) | |
if r > g and r > b and r > 100: | |
if g < 90 and b < 90: | |
return "brown" | |
# Check amber eyes (golden-brown) | |
if r > 150 and g > 100 and b < 100: | |
if r > g > b: | |
return "amber" | |
# Check hazel eyes (mix of brown and green) | |
if g > 100 and r > 100 and b < 100: | |
if abs(r - g) < 40: | |
return "hazel" | |
# Check green eyes (green-dominant) | |
if g > r and g > b: | |
if g > 100: | |
return "green" | |
# Check blue eyes (blue-dominant) | |
if b > r and b > g: | |
if b > 100: | |
return "blue" | |
# Check gray eyes (all values similar) | |
if abs(r - g) < 20 and abs(g - b) < 20 and abs(r - b) < 20: | |
if r > 100 and g > 100 and b > 100: | |
return "gray" | |
return "undefined" | |
# Create the Gradio interface | |
demo = gr.Interface( | |
fn=classify_image_with_multiple_models, # The function to run | |
inputs=gr.Image(type="pil"), | |
outputs=[ | |
gr.Label(num_top_classes=5, label="Face Shape"), | |
gr.Label(num_top_classes=5, label="Age"), | |
gr.Label(num_top_classes=3, label="Skin Type"), | |
gr.Label(num_top_classes=5, label="Acne Type"), | |
gr.Label(num_top_classes=5, label="Hair Color"), | |
gr.Label(num_top_classes=4, label="Eye Shape"), | |
gr.Label(num_top_classes=5, label="Eye Color"), | |
gr.Label(num_top_classes=7, label="Race"), | |
gr.Label(num_top_classes=2, label="Gender"), | |
gr.Label(num_top_classes=2, label="Wrinkles"), | |
gr.Image(type="pil", label="Segmented Face", value="segmented_face.jpg") # Provide the path to the image | |
], | |
title="Multiple Model Classification", | |
description="Upload an image to classify the face using multiple classification models" | |
) | |
#demo.launch(auth=("admin", "pass1234")) | |
demo.launch() | |