check webcam
Browse files
main.py
CHANGED
@@ -26,6 +26,8 @@ track_model = YOLO('yolov8n.pt') # Load an official Detect model
|
|
26 |
|
27 |
# ultraltics
|
28 |
|
|
|
|
|
29 |
# Defining inferencer models to lookup in function
|
30 |
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d, "Detect and track":track_model}
|
31 |
|
@@ -94,7 +96,7 @@ def poses(inferencer, video, vis_out_dir, kpt_thr):
|
|
94 |
|
95 |
return out_file
|
96 |
|
97 |
-
def infer(video, check, kpt_thr):
|
98 |
print("[INFO] VIDEO INPUT: ", video)
|
99 |
|
100 |
# Selecting the specific inferencer
|
@@ -108,11 +110,14 @@ def infer(video, check, kpt_thr):
|
|
108 |
if i == "Detect and track":
|
109 |
#continue
|
110 |
trackfile = show_tracking(video, vis_out_dir, inferencer)
|
111 |
-
|
112 |
else:
|
113 |
out_file = poses(inferencer, video, vis_out_dir, kpt_thr)
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
116 |
|
117 |
print(out_files)
|
118 |
|
@@ -123,16 +128,20 @@ def run():
|
|
123 |
check_web = gr.CheckboxGroup(choices = ["Detect and track", "Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
|
124 |
check_file = gr.CheckboxGroup(choices = ["Detect and track", "Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
|
125 |
|
|
|
|
|
|
|
|
|
126 |
# Insert slider with kpt_thr
|
127 |
web_kpthr = gr.Slider(0, 1, value=0.3)
|
128 |
file_kpthr = gr.Slider(0, 1, value=0.3)
|
129 |
|
130 |
webcam = gr.Interface(
|
131 |
fn=infer,
|
132 |
-
inputs= [gr.Video(source="webcam", height=512), check_web, web_kpthr],
|
133 |
outputs = [gr.Video(format='mp4', height=512, label="Detect and track", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d hand poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)],
|
134 |
title = 'Tracking and pose estimation',
|
135 |
-
description =
|
136 |
allow_flagging=False
|
137 |
)
|
138 |
|
@@ -141,7 +150,7 @@ def run():
|
|
141 |
inputs = [gr.Video(source="upload", height=512), check_file, file_kpthr],
|
142 |
outputs = [gr.Video(format='mp4', height=512, label="Detect and track", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d hand poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)],
|
143 |
title = 'Tracking and pose estimation',
|
144 |
-
description =
|
145 |
allow_flagging=False
|
146 |
)
|
147 |
|
@@ -154,9 +163,6 @@ def run():
|
|
154 |
|
155 |
|
156 |
if __name__ == "__main__":
|
157 |
-
description = """
|
158 |
-
\n\nHere you can upload videos or record one with your webcam and track objects or detect bodyposes in 2d and 3d.
|
159 |
-
"""
|
160 |
run()
|
161 |
|
162 |
# https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift
|
|
|
26 |
|
27 |
# ultraltics
|
28 |
|
29 |
+
# [INFO] VIDEO INPUT: /tmp/gradio/927601b660ec45919366ce37df1ed004a1fcffab/sample_flip.webm
|
30 |
+
|
31 |
# Defining inferencer models to lookup in function
|
32 |
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d, "Detect and track":track_model}
|
33 |
|
|
|
96 |
|
97 |
return out_file
|
98 |
|
99 |
+
def infer(video, check, kpt_thr, webcam=False):
|
100 |
print("[INFO] VIDEO INPUT: ", video)
|
101 |
|
102 |
# Selecting the specific inferencer
|
|
|
110 |
if i == "Detect and track":
|
111 |
#continue
|
112 |
trackfile = show_tracking(video, vis_out_dir, inferencer)
|
113 |
+
|
114 |
else:
|
115 |
out_file = poses(inferencer, video, vis_out_dir, kpt_thr)
|
116 |
+
|
117 |
+
if webcam==True:
|
118 |
+
out_files.extend(video)
|
119 |
+
else:
|
120 |
+
out_files.extend(out_file)
|
121 |
|
122 |
print(out_files)
|
123 |
|
|
|
128 |
check_web = gr.CheckboxGroup(choices = ["Detect and track", "Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
|
129 |
check_file = gr.CheckboxGroup(choices = ["Detect and track", "Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
|
130 |
|
131 |
+
description = """
|
132 |
+
\n\nHere you can upload videos or record one with your webcam and track objects or detect bodyposes in 2d and 3d.
|
133 |
+
"""
|
134 |
+
|
135 |
# Insert slider with kpt_thr
|
136 |
web_kpthr = gr.Slider(0, 1, value=0.3)
|
137 |
file_kpthr = gr.Slider(0, 1, value=0.3)
|
138 |
|
139 |
webcam = gr.Interface(
|
140 |
fn=infer,
|
141 |
+
inputs= [gr.Video(source="webcam", height=512), check_web, web_kpthr, True], # /tmp/gradio/927601b660ec45919366ce37df1ed004a1fcffab/sample_flip.webm
|
142 |
outputs = [gr.Video(format='mp4', height=512, label="Detect and track", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d hand poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)],
|
143 |
title = 'Tracking and pose estimation',
|
144 |
+
description = description,
|
145 |
allow_flagging=False
|
146 |
)
|
147 |
|
|
|
150 |
inputs = [gr.Video(source="upload", height=512), check_file, file_kpthr],
|
151 |
outputs = [gr.Video(format='mp4', height=512, label="Detect and track", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 2d hand poses", show_label=True), gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)],
|
152 |
title = 'Tracking and pose estimation',
|
153 |
+
description = description,
|
154 |
allow_flagging=False
|
155 |
)
|
156 |
|
|
|
163 |
|
164 |
|
165 |
if __name__ == "__main__":
|
|
|
|
|
|
|
166 |
run()
|
167 |
|
168 |
# https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift
|