mishigify / app.py
osanseviero's picture
osanseviero HF staff
Update app.py
e72b044
import gradio as gr
import mediapipe as mp
import numpy as np
import cv2
title = "Mishigify Me"
description = " Demo for adding some mishig to this"
article = "<p style='text-align: center'><a href='https://google.github.io/mediapipe/solutions/face_detection.html' target='_blank'>Mediapipe Face Detection</a> | <a href='https://github.com/google/mediapipe' target='_blank'>Github Repo</a></p>"
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
def draw_mishigs(image, results):
height, width, _ = image.shape
output_img = image.copy()
if results.detections:
for detection in results.detections:
face_coordinates = np.array([[detection.location_data.relative_keypoints[i].x*width,
detection.location_data.relative_keypoints[i].y*height]
for i in [0,1,3]], dtype=np.float32)
M = cv2.getAffineTransform(huggingface_landmarks, face_coordinates)
transformed_huggingface = cv2.warpAffine(huggingface_image, M, (width, height))
transformed_huggingface_mask = transformed_huggingface[:,:,3] != 0
output_img[transformed_huggingface_mask] = transformed_huggingface[transformed_huggingface_mask,:3]
return output_img
def mishig_me(image):
with mp_face_detection.FaceDetection(
model_selection=1,
min_detection_confidence=0.01) as face_detection:
# Convert the BGR image to RGB and process it with MediaPipe Face Mesh.
results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return draw_mishigs(image, results)
# Load hugging face logo and landmark coordinates
huggingface_image = cv2.imread("images/mishig-2 (1).png", cv2.IMREAD_UNCHANGED)
huggingface_image = cv2.cvtColor(huggingface_image, cv2.COLOR_BGRA2RGBA)
huggingface_landmarks = np.array([[747,697],[1289,697],[1022,1116]], dtype=np.float32)
gr.Interface(mishig_me,
inputs=gr.Image(label="Input Image"),
outputs=gr.Image(label="Output Image"),
title=title,
examples=[["images/people.jpg"]],
description=description,
article=article).launch()