hugging-face-me / app.py
ibaiGorordo's picture
Fixed link
ef4425b
import gradio as gr
import mediapipe as mp
import numpy as np
import cv2
title = "Hugging Face Me"
description = " Demo for overlaying the Hugging Face logo on your face using the Mediapipe Face Detection model."
article = "<p style='text-align: center'><a href='https://google.github.io/mediapipe/solutions/face_detection.html' target='_blank'>Mediapipe Face Detection</a> | <a href='https://github.com/google/mediapipe' target='_blank'>Github Repo</a></p>"
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
def draw_huggingfaces(image, results):
height, width, _ = image.shape
output_img = image.copy()
if results.detections:
for detection in results.detections:
face_coordinates = np.array([[detection.location_data.relative_keypoints[i].x*width,
detection.location_data.relative_keypoints[i].y*height]
for i in [0,1,3]], dtype=np.float32)
M = cv2.getAffineTransform(huggingface_landmarks, face_coordinates)
transformed_huggingface = cv2.warpAffine(huggingface_image, M, (width, height))
transformed_huggingface_mask = transformed_huggingface[:,:,3] != 0
output_img[transformed_huggingface_mask] = transformed_huggingface[transformed_huggingface_mask,:3]
return output_img
def huggingface_me(image):
with mp_face_detection.FaceDetection(
model_selection=0,
min_detection_confidence=0.5) as face_detection:
# Convert the BGR image to RGB and process it with MediaPipe Face Mesh.
results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return draw_huggingfaces(image, results)
# Load hugging face logo and landmark coordinates
huggingface_image = cv2.imread("images/hugging-face.png", cv2.IMREAD_UNCHANGED)
huggingface_image = cv2.cvtColor(huggingface_image, cv2.COLOR_BGRA2RGBA)
huggingface_landmarks = np.array([[747,697],[1289,697],[1022,1116]], dtype=np.float32)
webcam_image = gr.inputs.Image(label="Input Image", source="webcam")
output_image = gr.outputs.Image(label="Output Image")
gr.Interface(huggingface_me,
live=True,
inputs=webcam_image,
outputs=output_image,
title=title,
description=description,
article=article, ).launch()