xmrt commited on
Commit
22e7a27
·
1 Parent(s): 34a9259

file display

Browse files
Files changed (1) hide show
  1. main.py +26 -28
main.py CHANGED
@@ -2,6 +2,9 @@
2
  import mmpose
3
  from mmpose.apis import MMPoseInferencer
4
 
 
 
 
5
  # Gradio
6
  import gradio as gr
7
 
@@ -23,21 +26,19 @@ human3d = MMPoseInferencer(pose3d="human3d")
23
  # Defining inferencer models to lookup in function
24
  inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
25
 
26
- # inferencer = MMPoseInferencer('human')
27
- # inferencer = MMPoseInferencer(pose3d="human3d")
28
-
29
- # https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift
30
- # motionbert_ft_h36m-d80af323_20230531.pth
31
- # simple3Dbaseline_h36m-f0ad73a4_20210419.pth
32
- # videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth
33
- # videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth
34
- # videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth
35
- # videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth
36
-
37
- # https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py
38
 
39
  print("[INFO]: Downloaded models!")
40
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def poses(photo, check):
43
  # Selecting the specific inferencer
@@ -64,27 +65,15 @@ def poses(photo, check):
64
 
65
  return out_files
66
 
67
- # # specify detection model by alias
68
- # # the available aliases include 'human', 'hand', 'face', 'animal',
69
- # # as well as any additional aliases defined in mmdet
70
- # inferencer = MMPoseInferencer(
71
- # # suppose the pose estimator is trained on custom dataset
72
- # pose2d='custom_human_pose_estimator.py',
73
- # pose2d_weights='custom_human_pose_estimator.pth',
74
- # det_model='human'
75
- # )
76
-
77
-
78
  def run():
79
  #https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
80
- available_methods = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"]
81
- check_web = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want")
82
- check_file = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want")
83
 
84
  webcam = gr.Interface(
85
  fn=poses,
86
  inputs= [gr.Video(source="webcam", height=412), check_web],
87
- outputs = [gr.PlayableVideo()]*len(available_methods), #file_types=['.mp4'] #gr.Video(),
88
  title = 'Pose estimation',
89
  description = 'Pose estimation on video',
90
  allow_flagging=False
@@ -93,7 +82,7 @@ def run():
93
  file = gr.Interface(
94
  poses,
95
  inputs = [gr.Video(source="upload", height=412), check_file],
96
- outputs = [gr.PlayableVideo()]*len(available_methods),
97
  allow_flagging=False
98
  )
99
 
@@ -107,3 +96,12 @@ def run():
107
 
108
  if __name__ == "__main__":
109
  run()
 
 
 
 
 
 
 
 
 
 
2
  import mmpose
3
  from mmpose.apis import MMPoseInferencer
4
 
5
+ # Ultralytics
6
+ #from ultralytics import YOLO
7
+
8
  # Gradio
9
  import gradio as gr
10
 
 
26
  # Defining inferencer models to lookup in function
27
  inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
28
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  print("[INFO]: Downloaded models!")
31
 
32
+ def tracking(video, model, boxes=True):
33
+ print("[INFO] Loading model...")
34
+ # Load an official or custom model
35
+
36
+ # Perform tracking with the model
37
+ print("[INFO] Starting tracking!")
38
+ # https://docs.ultralytics.com/modes/predict/
39
+ annotated_frame = model(video, device="cuda", boxes=boxes)
40
+
41
+ return annotated_frame
42
 
43
  def poses(photo, check):
44
  # Selecting the specific inferencer
 
65
 
66
  return out_files
67
 
 
 
 
 
 
 
 
 
 
 
 
68
  def run():
69
  #https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
70
+ check_web = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
71
+ check_file = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
 
72
 
73
  webcam = gr.Interface(
74
  fn=poses,
75
  inputs= [gr.Video(source="webcam", height=412), check_web],
76
+ outputs = [gr.PlayableVideo()]*len(["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"]), #file_types=['.mp4'] #gr.Video(),
77
  title = 'Pose estimation',
78
  description = 'Pose estimation on video',
79
  allow_flagging=False
 
82
  file = gr.Interface(
83
  poses,
84
  inputs = [gr.Video(source="upload", height=412), check_file],
85
+ outputs = [gr.PlayableVideo()]*len(["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"]),
86
  allow_flagging=False
87
  )
88
 
 
96
 
97
  if __name__ == "__main__":
98
  run()
99
+
100
+ # https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift
101
+ # motionbert_ft_h36m-d80af323_20230531.pth
102
+ # simple3Dbaseline_h36m-f0ad73a4_20210419.pth
103
+ # videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth
104
+ # videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth
105
+ # videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth
106
+ # videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth
107
+ # https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py