|
import cv2 |
|
import mediapipe as mp |
|
import gradio as gr |
|
|
|
mp_drawing = mp.solutions.drawing_utils |
|
mp_pose = mp.solutions.pose |
|
|
|
def detect_pose(frame): |
|
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: |
|
results = pose.process(frame) |
|
if results.pose_landmarks: |
|
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, |
|
landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=4), |
|
connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2)) |
|
return frame |
|
|
|
def process_video(frame): |
|
frame = detect_pose(frame) |
|
return frame |
|
|
|
def video_stream(): |
|
cap = cv2.VideoCapture(0) |
|
while True: |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
frame = process_video(frame) |
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) |
|
yield frame |
|
|
|
iface = gr.Interface( |
|
fn=lambda x: x, |
|
inputs=gr.Video(source="webcam", type="numpy"), |
|
outputs="video", |
|
live=True, |
|
title="Live Pose Detection" |
|
) |
|
|
|
iface.launch() |
|
|