import gradio as gr ############################## def gradio_inputs_for_MD_DLC(md_models_list, # list(MD_models_dict.keys()) dlc_models_list, # list(DLC_models_dict.keys()) ): # Input image gr_image_input = gr.inputs.Image(type="pil", label="Input Image") # Models gr_mega_model_input = gr.inputs.Dropdown(choices=md_models_list, default='md_v5a', # default option type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. label='Select MegaDetector model') gr_dlc_model_input = gr.inputs.Dropdown(choices=dlc_models_list, # choices default='full_cat', # default option type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. label='Select DeepLabCut model') # Other inputs gr_dlc_only_checkbox = gr.inputs.Checkbox(False, label='Run DLClive only, directly on input image?') gr_str_labels_checkbox = gr.inputs.Checkbox(True, label='Show bodypart labels?') gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.02,0.8, label='Set confidence threshold for animal detections') gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0, label='Set confidence threshold for keypoints') # Data viz gr_keypt_color = gr.ColorPicker(value ="#ff0000", label="choose color for keypoint label") gr_labels_font_style = gr.inputs.Dropdown(choices=['amiko', 'animals', 'nature', 'painter', 'zen'], default='amiko', type='value', label='Select keypoint label font') gr_slider_font_size = gr.inputs.Slider(5,30,1,8, label='Set font size') gr_slider_marker_size = gr.inputs.Slider(1,20,1,5, label='Set marker size') # list of inputs return [gr_image_input, gr_mega_model_input, gr_dlc_model_input, gr_dlc_only_checkbox, gr_str_labels_checkbox, gr_slider_conf_bboxes, gr_slider_conf_keypoints, gr_labels_font_style, gr_slider_font_size, gr_keypt_color, gr_slider_marker_size] #################################################### def gradio_outputs_for_MD_DLC(): # User interface: outputs gr_image_output = gr.outputs.Image(type="pil", label="Output Image") gr_file_download = gr.File(label="Download JSON file") return [gr_image_output, gr_file_download] ############################################## # User interace: description def gradio_description_and_examples(): title = "MegaDetector v5 + DeepLabCut!" description = "Contributed by Sofia Minano, Neslihan Wittek, Nirel Kadzo, VicShaoChih Chiang, Sabrina Benas -- DLC AI Residents 2022.\ This App detects and estimate the pose of animals in camera trap images using MegaDetector v5a + DeepLabCut-live. \ We host models from the DeepLabCut ModelZoo Project\, and two MegaDetector Models. Please carefully check their licensing information if you use this project. The App additionally builds upon on work from hlydecker/MegaDetector_v5 \ sofmi/MegaDetector_DLClive \ Neslihan/megadetector_dlcmodels\." # article = "

This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on GitHub. This app was built by Henry Lydecker but really depends on code and models developed by Ecologize and Microsoft AI for Earth. Find out more about the YOLO model from the original creator, Joseph Redmon. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. Source code | PyTorch Hub

" examples = [['examples/monkey_full.jpg', 'md_v5a','full_macaque', False, True, 0.5, 0.3, 'amiko', 9, 'blue', 3]] #['examples/dog.jpeg', 'md_v5a', 'full_dog', False, True, 0.5, 0.00, 'amiko',9, 'yellow', 3], #['examples/cat.jpg', 'md_v5a', 'full_cat', False, True, 0.5, 0.05, 'amiko', 9, 'purple', 3] return [title,description,examples]