vchiang001 commited on
Commit
c20ef4d
β€’
1 Parent(s): dda9d4e

changed pose size

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,6 +1,6 @@
1
- # Copied megadetector section from https://huggingface.co/spaces/hlydecker/MegaDetector_v5
2
- # Copied from https://huggingface.co/spaces/sofmi/MegaDetector_DLClive/blob/main/app.py
3
- # Copied from https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels/blob/main/app.py
4
 
5
 
6
  import gradio as gr
@@ -60,7 +60,7 @@ gr_pose_font_input = gr.inputs.Dropdown(choices=['amiko', 'nature', 'painter', '
60
  gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
61
  label='Set pose font size')
62
 
63
- gr_slider_pose_size = gr.inputs.Slider(1,20,1,2,
64
  label='Set pose size')
65
  gr_mega_model_input = gr.inputs.Dropdown(choices=['md_v5a','md_v5b'],
66
  default='md_v5a', # default option
@@ -345,8 +345,8 @@ outputs = [gr_gallery_output, #gr_json_output,
345
  gr_title = "MegaDetector v5 + DLClive by Sofi, Nesli, Nirel, Vic"
346
  gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live. \
347
  Builds up on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a> \
348
- <a href='https://huggingface.co/spaces/sofmi/MegaDetector_DLClive</a> \
349
- <a href='https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels</a>"
350
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
351
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
352
 
 
1
+ # Built from https://huggingface.co/spaces/hlydecker/MegaDetector_v5
2
+ # Built from https://huggingface.co/spaces/sofmi/MegaDetector_DLClive/blob/main/app.py
3
+ # Built from https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels/blob/main/app.py
4
 
5
 
6
  import gradio as gr
 
60
  gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
61
  label='Set pose font size')
62
 
63
+ gr_slider_pose_size = gr.inputs.Slider(0.5,5,0.2,2,
64
  label='Set pose size')
65
  gr_mega_model_input = gr.inputs.Dropdown(choices=['md_v5a','md_v5b'],
66
  default='md_v5a', # default option
 
345
  gr_title = "MegaDetector v5 + DLClive by Sofi, Nesli, Nirel, Vic"
346
  gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live. \
347
  Builds up on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a> \
348
+ <a href='https://huggingface.co/spaces/sofmi/MegaDetector_DLClive'>sofmi/MegaDetector_DLClive</a> \
349
+ <a href='https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels'>Neslihan/megadetector_dlcmodels</a>"
350
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
351
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
352