Spaces:
Runtime error
Runtime error
File size: 4,593 Bytes
b8891c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import cv2
import mediapipe as mp
import time
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
from moviepy.video.io.bindings import mplfig_to_npimage
import os
plt.rcParams['font.sans-serif'] = ['SimHei']
# 导入姿态跟踪方法
mpPose = mp.solutions.pose # 姿态识别方法
pose = mpPose.Pose(static_image_mode=False, # 静态图模式,False代表置信度高时继续跟踪,True代表实时跟踪检测新的结果
#upper_body_only=False, # 是否只检测上半身
smooth_landmarks=True, # 平滑,一般为True
min_detection_confidence=0.5, # 检测置信度
min_tracking_confidence=0.5) # 跟踪置信度
# 检测置信度大于0.5代表检测到了,若此时跟踪置信度大于0.5就继续跟踪,小于就沿用上一次,避免一次又一次重复使用模型
# 导入绘图方法
mpDraw = mp.solutions.drawing_utils
# 定义一个处理视频的函数
def video_processor(video):
# 创建一个VideoCapture对象
cap = cv2.VideoCapture(video)
number_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
# 创建一个VideoWriter对象,输出为mp4格式
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter('output.mp4', fourcc, fps, (width, height))
# 创建一个图形对象
fig, ax = plt.subplots(figsize=(3, 3))
# ax.set_position((0.1, 0.1, 0.8, 0.4))
ax.set_xlim(0, number_of_frames)
ax.set_ylim(-10, 200)
ax.set_title('左腿夹角')
ax.set_xlabel('Time')
ax.set_ylabel('Angle')
line, = ax.plot([], [], 'r-')
# 定义更新曲线图的函数
def update_graph(frame, angle):
# 获取当前时间
time = frame
# 添加新的数据点
line.set_xdata(np.append(line.get_xdata(), time))
line.set_ydata(np.append(line.get_ydata(), angle))
# 返回更新后的曲线
return line
# 处理每一帧图像
# lmlist = [] # 存放人体关键点信息
left_23 = [] # 存 左髋关节的点
left_25 = [] # 存 左髋关节的点
a = [] # 存着所有角度
def angle(p1, p2):
x1, y1 = p1
x2, y2 = p2
dx = x2 - x1
dy = y2 - y1
return math.degrees(math.atan2(dy, dx))
frame_count = 0 # 记录帧数
while True:
# 读取一帧图像
success, img = cap.read()
if not success:
print('视频结束')
break
frame_count += 1
# 将图像转为RGB格式
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 将图像传给姿态识别模型
results = pose.process(imgRGB)
# 如果检测到体态就执行下面内容,没检测到就不执行
if results.pose_landmarks:
# 绘制姿态坐标点
mpDraw.draw_landmarks(img, results.pose_landmarks, mpPose.POSE_CONNECTIONS)
# 获取32个人体关键点坐标, index记录是第几个关键点
for index, lm in enumerate(results.pose_landmarks.landmark):
# 保存每帧图像的宽、高、通道数
h, w, c = img.shape
# 转换为像素坐标
cx, cy = int(lm.x * w), int(lm.y * h)
# 保存坐标信息
# lmlist.append((cx, cy))
if index == 23:
left_23.append([cx, cy])
if index == 25:
left_25.append([cx, cy])
# 在关键点上画圆圈
cv2.circle(img, (cx,cy), 3, (0,255,0), cv2.FILLED)
# 计算左腿夹角
if len(left_23) != 0 and len(left_25) != 0:
a.append(angle(left_23[-1],left_25[-1]))
# 更新曲线图
update_graph(frame_count, a[-1])
# 将图像转换为numpy数组
graph = mplfig_to_npimage(fig)
# 将曲线图叠加到视频上
img[-graph.shape[0]:,-graph.shape[1]:,:] = graph
# 写入新的视频文件
out.write(img)
# 释放资源
out.release()
cap.release()
# 返回输出视频的路径
return 'output.mp4'
import gradio as gr
demo = gr.Interface(fn=video_processor, title='滑板招式姿态识别',
inputs=gr.Video(source="upload"), outputs=gr.Video()
# examples=["Ollie120.mp4"]
)
demo.launch()
|