sofmi's picture
refactoring and small fixes (#27)
b6f51cf
import gradio as gr
##############################
def gradio_inputs_for_MD_DLC(md_models_list, # list(MD_models_dict.keys())
dlc_models_list, # list(DLC_models_dict.keys())
):
# Input image
gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
# Models
gr_mega_model_input = gr.inputs.Dropdown(choices=md_models_list,
default='md_v5a', # default option
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label='Select MegaDetector model')
gr_dlc_model_input = gr.inputs.Dropdown(choices=dlc_models_list, # choices
default='full_cat', # default option
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label='Select DeepLabCut model')
# Other inputs
gr_dlc_only_checkbox = gr.inputs.Checkbox(False,
label='Run DLClive only, directly on input image?')
gr_str_labels_checkbox = gr.inputs.Checkbox(True,
label='Show bodypart labels?')
gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.02,0.8,
label='Set confidence threshold for animal detections')
gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
label='Set confidence threshold for keypoints')
# Data viz
gr_keypt_color = gr.ColorPicker(label="choose color for keypoint label")
gr_labels_font_style = gr.inputs.Dropdown(choices=['amiko', 'nature', 'painter', 'animals', 'zen'],
default='amiko',
type='value',
label='Select keypoint label font')
gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
label='Set font size')
gr_slider_marker_size = gr.inputs.Slider(1,20,1,5,
label='Set marker size')
# list of inputs
return [gr_image_input,
gr_mega_model_input,
gr_dlc_model_input,
gr_dlc_only_checkbox,
gr_str_labels_checkbox,
gr_slider_conf_bboxes,
gr_slider_conf_keypoints,
gr_labels_font_style,
gr_slider_font_size,
gr_keypt_color,
gr_slider_marker_size]
####################################################
def gradio_outputs_for_MD_DLC():
# User interface: outputs
gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
gr_file_download = gr.File(label="Download JSON file")
return [gr_image_output,
gr_file_download]
##############################################
# User interace: description
def gradio_description_and_examples():
title = "MegaDetector v5 + DeepLabCut-Live!"
description = "Contributed by Sofia Minano, Neslihan Wittek, Nirel Kadzo, VicShaoChih Chiang, Sabrina Benas -- DLC AI Residents 2022..\
This App detects and estimate the pose of animals in camera trap images using <a href='https://github.com/microsoft/CameraTraps'>MegaDetector v5a</a> + <a href='https://github.com/DeepLabCut/DeepLabCut-live'>DeepLabCut-live</a>. \
We host models from the <a href='http://www.mackenziemathislab.org/dlc-modelzoo'>DeepLabCut ModelZoo Project</a>\, and two <a href='https://github.com/microsoft/CameraTraps/blob/main/megadetector.md'>MegaDetector Models</a>. Please carefully check their licensing information if you use this project. The App additionally builds upon on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a> \
<a href='https://huggingface.co/spaces/sofmi/MegaDetector_DLClive'>sofmi/MegaDetector_DLClive</a> \
<a href='https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels'>Neslihan/megadetector_dlcmodels</a>\."
# article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
examples = [['examples/monkey_full.jpg', 'md_v5a','full_macaque', False, True, 0.5, 0.3, 'amiko', 9, 'blue', 3],
['examples/dog.jpeg', 'md_v5a', 'full_dog', False, True, 0.5, 0.00, 'amiko',9, 'yellow', 3],
['examples/cat.jpg', 'md_v5a', 'full_cat', False, True, 0.5, 0.05, 'amiko', 9, 'purple', 3]]
return [title,description,examples]