Spaces:
Build error
Build error
Commit
β’
746600b
1
Parent(s):
a7d6809
added json, gallery, dataframe
Browse files
app.py
CHANGED
@@ -251,9 +251,10 @@ MD_model = torch.hub.load('ultralytics/yolov5', 'custom', "megadet_model/md_v5b.
|
|
251 |
|
252 |
|
253 |
####################################################
|
254 |
-
# Create user interface and launch
|
255 |
gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
|
256 |
-
|
|
|
257 |
gr_dlc_model_input = gr.inputs.Dropdown(choices=['full_cat','full_dog', 'monkey_face', 'full_human', 'full_monkey'], # choices
|
258 |
default='full_cat', # default option
|
259 |
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
|
@@ -274,12 +275,29 @@ gr_pose_font_input = gr.inputs.Dropdown(choices=['amiko', 'nature', 'painter', '
|
|
274 |
label='Select pose font')
|
275 |
gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
|
276 |
label='Set pose font size')
|
277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
279 |
#image = gr.inputs.Image(type="pil", label="Input Image")
|
280 |
#chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
|
281 |
#size = 640
|
282 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
283 |
gr_title = "MegaDetector v5 + DLClive"
|
284 |
gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live. \
|
285 |
Builds up on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a>"
|
@@ -287,16 +305,8 @@ gr_description = "Detect and estimate the pose of animals in camera trap images,
|
|
287 |
# examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
|
288 |
|
289 |
demo = gr.Interface(predict_pipeline,
|
290 |
-
inputs=
|
291 |
-
|
292 |
-
gr_dlc_only_checkbox,
|
293 |
-
gr_slider_conf_bboxes,
|
294 |
-
gr_slider_conf_keypoints,
|
295 |
-
gr_keypt_color,
|
296 |
-
gr_pose_font_input,
|
297 |
-
gr_slider_font_size,
|
298 |
-
],
|
299 |
-
outputs=gr_image_output,
|
300 |
title=gr_title,
|
301 |
description=gr_description,
|
302 |
theme="huggingface",
|
|
|
251 |
|
252 |
|
253 |
####################################################
|
254 |
+
# Create user interface and launch: all inputs
|
255 |
gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
|
256 |
+
|
257 |
+
|
258 |
gr_dlc_model_input = gr.inputs.Dropdown(choices=['full_cat','full_dog', 'monkey_face', 'full_human', 'full_monkey'], # choices
|
259 |
default='full_cat', # default option
|
260 |
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
|
|
|
275 |
label='Select pose font')
|
276 |
gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
|
277 |
label='Set pose font size')
|
278 |
+
|
279 |
+
inputs = [gr_image_input,
|
280 |
+
gr_dlc_model_input,
|
281 |
+
gr_dlc_only_checkbox,
|
282 |
+
gr_slider_conf_bboxes,
|
283 |
+
gr_slider_conf_keypoints,
|
284 |
+
gr_keypt_color,
|
285 |
+
gr_pose_font_input,
|
286 |
+
gr_slider_font_size,
|
287 |
+
]
|
288 |
|
289 |
#image = gr.inputs.Image(type="pil", label="Input Image")
|
290 |
#chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
|
291 |
#size = 640
|
292 |
|
293 |
+
####################################################
|
294 |
+
# Create user interface and launch: all outputs
|
295 |
+
gr_gallery_output = gr.outputs.Gallery(type="pil", label="Output Gallery")
|
296 |
+
gr_json_output = gr.outputs.json(label='megadetector json')
|
297 |
+
gr_json_output = gr.outputs.dataframe(label='pose coordinates')
|
298 |
+
|
299 |
+
outputs = [gr_gallery_output, gr_json_output, gr_json_output]
|
300 |
+
|
301 |
gr_title = "MegaDetector v5 + DLClive"
|
302 |
gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live. \
|
303 |
Builds up on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a>"
|
|
|
305 |
# examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
|
306 |
|
307 |
demo = gr.Interface(predict_pipeline,
|
308 |
+
inputs=inputs,
|
309 |
+
outputs=outputs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
title=gr_title,
|
311 |
description=gr_description,
|
312 |
theme="huggingface",
|