MMpose / main.py
xmrt's picture
visualization
332fb03
raw
history blame
1.9 kB
import mmpose
import os
from mmpose.apis import MMPoseInferencer
print("[INFO]: Imported modules!")
import gradio as gr
import numpy as np
import cv2
inferencer = MMPoseInferencer('human')
print("[INFO]: Downloaded models!")
def poses(photo):
result_generator = inferencer(photo,
vis_out_dir =".",
return_vis=True,
thickness=2)
video = cv2.VideoCapture(photo)
# Prepare to save video
output_file = os.path.join("output.mp4")
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
fps = video.get(cv2.CAP_PROP_FPS)
height = 480
width = 640
size = (width,height)
out_writer = cv2.VideoWriter(output_file, fourcc, fps, size)
for result in result_generator:
frame = result["visualization"]
print(frame)
print(type(frame))
out_writer.write(frame)
print(os.listdir())
print("[INFO]: Visualizing results!")
print(os.listdir())
print()
out_writer.release()
cv2.destroyAllWindows() # Closing window
return output_file
# # specify detection model by alias
# # the available aliases include 'human', 'hand', 'face', 'animal',
# # as well as any additional aliases defined in mmdet
# inferencer = MMPoseInferencer(
# # suppose the pose estimator is trained on custom dataset
# pose2d='custom_human_pose_estimator.py',
# pose2d_weights='custom_human_pose_estimator.pth',
# det_model='human'
# )
def run():
#https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
demo = gr.Interface(fn=poses,
inputs=gr.Video(source="webcam"),
outputs=gr.Video())
demo.launch(server_name="0.0.0.0", server_port=7860)
if __name__ == "__main__":
run()
print(os.listdir())