xmrt commited on
Commit
d8c9e2f
1 Parent(s): e281df9

3d pers looking weird

Browse files
Files changed (1) hide show
  1. main.py +13 -12
main.py CHANGED
@@ -120,6 +120,7 @@ def pose3d(video):
120
  thickness=4,
121
  radius = 5,
122
  return_vis=True,
 
123
  rebase_keypoint_height=True,
124
  device=device)
125
 
@@ -223,17 +224,6 @@ def run_UI():
223
  webcam_output4 = gr.Video(height=512, label = "Detection and tracking", show_label=True, format="mp4")
224
 
225
  with gr.Tab("General information"):
226
- gr.Markdown("You can load the keypoints in python in the following way: ")
227
- gr.Code(
228
- value="""def hello_world():
229
- return "Hello, world!"
230
-
231
- print(hello_world())""",
232
- language="python",
233
- interactive=True,
234
- show_label=False,
235
- )
236
-
237
  gr.Markdown("""
238
  \n # Information about the models
239
 
@@ -254,7 +244,18 @@ def run_UI():
254
  \n The tracking method in the Ultralight's YOLOv8 model is used for object tracking in videos. It takes a video file or a camera stream as input and returns the tracked objects in each frame. The method uses the COCO dataset classes for object detection and tracking.
255
 
256
  \n The COCO dataset contains 80 classes of objects such as person, car, bicycle, etc. See https://docs.ultralytics.com/datasets/detect/coco/ for all available classes. The tracking method uses the COCO classes to detect and track the objects in the video frames. The tracked objects are represented as bounding boxes with labels indicating the class of the object.""")
257
-
 
 
 
 
 
 
 
 
 
 
 
258
  # From file
259
  submit_pose_file.click(fn=pose2d,
260
  inputs= [video_input, file_kpthr],
 
120
  thickness=4,
121
  radius = 5,
122
  return_vis=True,
123
+ kpt_thr=0.3,
124
  rebase_keypoint_height=True,
125
  device=device)
126
 
 
224
  webcam_output4 = gr.Video(height=512, label = "Detection and tracking", show_label=True, format="mp4")
225
 
226
  with gr.Tab("General information"):
 
 
 
 
 
 
 
 
 
 
 
227
  gr.Markdown("""
228
  \n # Information about the models
229
 
 
244
  \n The tracking method in the Ultralight's YOLOv8 model is used for object tracking in videos. It takes a video file or a camera stream as input and returns the tracked objects in each frame. The method uses the COCO dataset classes for object detection and tracking.
245
 
246
  \n The COCO dataset contains 80 classes of objects such as person, car, bicycle, etc. See https://docs.ultralytics.com/datasets/detect/coco/ for all available classes. The tracking method uses the COCO classes to detect and track the objects in the video frames. The tracked objects are represented as bounding boxes with labels indicating the class of the object.""")
247
+ gr.Markdown("You can load the keypoints in python in the following way: ")
248
+ gr.Code(
249
+ value="""def hello_world():
250
+ return "Hello, world!"
251
+
252
+ print(hello_world())""",
253
+ language="python",
254
+ interactive=True,
255
+ show_label=False,
256
+ )
257
+
258
+
259
  # From file
260
  submit_pose_file.click(fn=pose2d,
261
  inputs= [video_input, file_kpthr],