File size: 2,816 Bytes
c343981
 
 
 
 
a57daeb
9ae8fe9
 
 
 
c343981
 
 
9ae8fe9
c343981
9ae8fe9
ada7c64
9ae8fe9
 
 
 
 
 
 
 
 
 
 
 
 
c343981
9ae8fe9
 
 
 
 
40c1a0b
9ae8fe9
 
 
 
 
 
 
 
 
 
 
aeffd3b
 
 
c343981
 
 
 
 
 
 
 
a57daeb
9ae8fe9
a57daeb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import mediapipe as mp
import gradio as gr
import cv2
import torch
# Images
torch.hub.download_url_to_file('https://i.imgur.com/9koaC6b.png', 'pose.png')

import mediapipe as mp
mp_holistic = mp.solutions.holistic

# Prepare DrawingSpec for drawing the face landmarks later.
mp_drawing = mp.solutions.drawing_utils 
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)

def inference(image):
    # Run MediaPipe Holistic and draw pose landmarks.
    with mp_holistic.Holistic(static_image_mode=True, min_detection_confidence=0.5, model_complexity=2) as holistic:
        # Convert the BGR image to RGB and process it with MediaPipe Pose.
        results = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    
        # Print nose coordinates.
        image_hight, image_width, _ = image.shape
        if results.pose_landmarks:
          print(
            f'Nose coordinates: ('
            f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, '
            f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_hight})'
          )
    
        # Draw pose landmarks.
        annotated_image = image.copy()
        mp_drawing.draw_landmarks(annotated_image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
        mp_drawing.draw_landmarks(annotated_image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
        mp_drawing.draw_landmarks(
            image=annotated_image, 
            landmark_list=results.face_landmarks, 
            connections=mp_holistic.FACEMESH_TESSELATION,
            landmark_drawing_spec=drawing_spec,
            connection_drawing_spec=drawing_spec)
        mp_drawing.draw_landmarks(
            image=annotated_image, 
            landmark_list=results.pose_landmarks, 
            connections=mp_holistic.POSE_CONNECTIONS,
            landmark_drawing_spec=drawing_spec,
            connection_drawing_spec=drawing_spec)
        return annotated_image
    

title = "Holistic Tracking"
description = "Gradio demo for Holistic Tracking, Simultaneous and semantically consistent tracking of 33 pose, 21 per-hand, and 468 facial landmarks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://ai.googleblog.com/2020/12/mediapipe-holistic-simultaneous-face.html'>MediaPipe Holistic — Simultaneous Face, Hand and Pose Prediction, on Device</a> | <a href='https://github.com/google/mediapipe'>Github Repo</a></p>"
gr.Interface(
    inference, 
    [gr.inputs.Image(label="Input")], 
    gr.outputs.Image(type="pil", label="Output"),
    title=title,
    description=description,
    article=article, 
    examples=[
   ["pose.png"]    
              ]
    ).launch()