import mmpose | |
import os | |
import glob | |
from mmpose.apis import MMPoseInferencer | |
import gradio as gr | |
import numpy as np | |
import cv2 | |
print("[INFO]: Imported modules!") | |
# inferencer = MMPoseInferencer('hand') # 'hand', 'human , device='cuda' | |
# inferencer = MMPoseInferencer('human') | |
inferencer = MMPoseInferencer(pose3d='human3d') | |
# https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift | |
# motionbert_ft_h36m-d80af323_20230531.pth | |
# simple3Dbaseline_h36m-f0ad73a4_20210419.pth | |
# videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth | |
# videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth | |
# videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth | |
# videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth | |
# https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py | |
print("[INFO]: Downloaded models!") | |
def poses(photo): | |
print(photo) | |
result_generator = inferencer(photo, | |
vis_out_dir =".", | |
return_vis=True, | |
thickness=2) | |
# # Prepare to save video | |
# output_file = os.path.join("output.mp4") | |
# fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video | |
# fps = 32 | |
# height = 480 | |
# width = 640 | |
# size = (width,height) | |
# out_writer = cv2.VideoWriter(output_file, fourcc, fps, size) | |
# for result in result_generator: | |
# print("[INFO] Result: ", result) | |
# frame = result["visualization"] | |
# out_writer.write(cv2.cvtColor(frame[0], cv2.COLOR_BGR2RGB)) | |
# print(os.listdir()) | |
# print("[INFO]: Visualizing results!") | |
# print(os.listdir()) | |
# print() | |
# out_writer.release() | |
# cv2.destroyAllWindows() # Closing window | |
output_file = glob.glob("*.mp4") | |
return output_file | |
# # specify detection model by alias | |
# # the available aliases include 'human', 'hand', 'face', 'animal', | |
# # as well as any additional aliases defined in mmdet | |
# inferencer = MMPoseInferencer( | |
# # suppose the pose estimator is trained on custom dataset | |
# pose2d='custom_human_pose_estimator.py', | |
# pose2d_weights='custom_human_pose_estimator.pth', | |
# det_model='human' | |
# ) | |
def run(): | |
#https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md | |
demo = gr.Interface(fn=poses, | |
inputs=gr.Video(source="upload"), | |
outputs=gr.Video()) | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |
if __name__ == "__main__": | |
run() | |
print(os.listdir()) | |