Spaces:
Runtime error
Runtime error
sfmig
commited on
Commit
·
c4bc253
1
Parent(s):
4687e19
added string labels, reviewing kpts
Browse files- app.py +78 -37
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,51 +1,68 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
|
|
3 |
import torch
|
4 |
import torchvision
|
5 |
-
import
|
|
|
6 |
from PIL import Image
|
7 |
-
|
8 |
-
import
|
9 |
-
import pdb
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
import
|
14 |
|
15 |
#########################################
|
16 |
-
|
17 |
def draw_keypoints_on_image(image,
|
18 |
keypoints,
|
|
|
19 |
color='red',
|
20 |
radius=2,
|
21 |
-
use_normalized_coordinates=True
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
25 |
image: a PIL.Image object.
|
26 |
keypoints: a numpy array with shape [num_keypoints, 2].
|
27 |
color: color to draw the keypoints with. Default is red.
|
28 |
radius: keypoint radius. Default value is 2.
|
29 |
use_normalized_coordinates: if True (default), treat keypoint values as
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
############################################
|
50 |
|
51 |
# Predict detections with MegaDetector v5a model
|
@@ -63,7 +80,6 @@ def predict_md(im, size=640):
|
|
63 |
def crop_animal_detections(yolo_results,
|
64 |
likelihood_th):
|
65 |
## crop if animal and return list of crops
|
66 |
-
|
67 |
list_labels_as_str = yolo_results.names #['animal', 'person', 'vehicle']
|
68 |
list_np_animal_crops = []
|
69 |
|
@@ -124,8 +140,15 @@ def predict_dlc(list_np_crops,
|
|
124 |
for crop in list_np_crops:
|
125 |
# scale crop here?
|
126 |
keypts_xyp = dlc_live.get_pose(crop) # third column is llk!
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
# set kpts below threhsold to nan
|
128 |
keypts_xyp[keypts_xyp[:,-1] < kpts_likelihood_th,:] = np_aux.fill(np.nan)
|
|
|
129 |
list_kpts_per_crop.append(keypts_xyp)
|
130 |
|
131 |
return list_kpts_per_crop
|
@@ -138,21 +161,35 @@ def predict_pipeline(img_input,
|
|
138 |
bbox_likelihood_th,
|
139 |
kpts_likelihood_th):
|
140 |
|
|
|
|
|
141 |
if model_input_str == 'full_cat':
|
142 |
path_to_DLCmodel = "DLC_models/DLC_Cat_resnet_50_iteration-0_shuffle-0"
|
|
|
143 |
elif model_input_str == 'full_dog':
|
144 |
path_to_DLCmodel = "DLC_models/DLC_Dog_resnet_50_iteration-0_shuffle-0"
|
|
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
# ### Run Megadetector
|
147 |
md_results = predict_md(img_input) #Image.fromarray(results.imgs[0])
|
148 |
|
149 |
-
|
|
|
150 |
list_crops = crop_animal_detections(md_results,
|
151 |
bbox_likelihood_th)
|
152 |
|
|
|
153 |
# Run DLC
|
154 |
-
# TODO: add llk threshold for kpts too?
|
155 |
dlc_proc = Processor()
|
|
|
|
|
156 |
if flag_dlc_only:
|
157 |
# compute kpts on input img
|
158 |
list_kpts_per_crop = predict_dlc([np.asarray(img_input)],#list_crops,--------
|
@@ -162,6 +199,7 @@ def predict_pipeline(img_input,
|
|
162 |
# draw kpts on input img
|
163 |
draw_keypoints_on_image(img_input,
|
164 |
list_kpts_per_crop[0], # a numpy array with shape [num_keypoints, 2].
|
|
|
165 |
color='red',
|
166 |
radius=2,
|
167 |
use_normalized_coordinates=False)
|
@@ -183,9 +221,11 @@ def predict_pipeline(img_input,
|
|
183 |
img_crop = Image.fromarray(np_crop)
|
184 |
draw_keypoints_on_image(img_crop,
|
185 |
kpts_crop, # a numpy array with shape [num_keypoints, 2].
|
|
|
186 |
color='red',
|
187 |
radius=2,
|
188 |
-
use_normalized_coordinates=False
|
|
|
189 |
|
190 |
## Paste crop in original image
|
191 |
# https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.paste
|
@@ -216,13 +256,14 @@ gr_dlc_only_checkbox = gr.inputs.Checkbox(False,
|
|
216 |
gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.05,0.8,
|
217 |
label='Set confidence threshold for animal detections')
|
218 |
gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
|
219 |
-
|
220 |
#image = gr.inputs.Image(type="pil", label="Input Image")
|
221 |
#chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
|
222 |
#size = 640
|
223 |
|
224 |
gr_title = "MegaDetector v5 + DLClive"
|
225 |
-
gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live
|
|
|
226 |
# article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
|
227 |
# examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
|
228 |
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
|
3 |
import torch
|
4 |
import torchvision
|
5 |
+
from dlclive import DLCLive, Processor
|
6 |
+
|
7 |
from PIL import Image
|
8 |
+
from PIL import ImageFont
|
9 |
+
from PIL import ImageDraw
|
|
|
10 |
|
11 |
+
import numpy as np
|
12 |
+
import math
|
13 |
+
# import json
|
14 |
+
import os
|
15 |
+
import yaml
|
16 |
|
17 |
+
import pdb
|
18 |
|
19 |
#########################################
|
20 |
+
|
21 |
def draw_keypoints_on_image(image,
|
22 |
keypoints,
|
23 |
+
map_label_id_to_str,
|
24 |
color='red',
|
25 |
radius=2,
|
26 |
+
use_normalized_coordinates=True,
|
27 |
+
):
|
28 |
+
"""Draws keypoints on an image.
|
29 |
+
Modified from:
|
30 |
+
https://www.programcreek.com/python/?code=fjchange%2Fobject_centric_VAD%2Fobject_centric_VAD-master%2Fobject_detection%2Futils%2Fvisualization_utils.py
|
31 |
+
Args:
|
32 |
image: a PIL.Image object.
|
33 |
keypoints: a numpy array with shape [num_keypoints, 2].
|
34 |
color: color to draw the keypoints with. Default is red.
|
35 |
radius: keypoint radius. Default value is 2.
|
36 |
use_normalized_coordinates: if True (default), treat keypoint values as
|
37 |
+
relative to the image. Otherwise treat them as absolute.
|
38 |
+
"""
|
39 |
+
# get a drawing context
|
40 |
+
draw = ImageDraw.Draw(image)
|
41 |
+
# font = ImageFont.truetype("sans-serif.ttf", 16)
|
42 |
+
|
43 |
+
im_width, im_height = image.size
|
44 |
+
keypoints_x = [k[1] for k in keypoints]
|
45 |
+
keypoints_y = [k[0] for k in keypoints]
|
46 |
+
|
47 |
+
# adjust keypoints coords if required
|
48 |
+
if use_normalized_coordinates:
|
49 |
+
keypoints_x = tuple([im_width * x for x in keypoints_x])
|
50 |
+
keypoints_y = tuple([im_height * y for y in keypoints_y])
|
51 |
+
|
52 |
+
# draw ellipses around keypoints and add string labels
|
53 |
+
font = ImageFont.truetype("Amiko-Regular.ttf", 8) # font = ImageFont.truetype(<font-file>, <font-size>)
|
54 |
+
for i, (keypoint_x, keypoint_y) in enumerate(zip(keypoints_x, keypoints_y)):
|
55 |
+
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
|
56 |
+
(keypoint_x + radius, keypoint_y + radius)],
|
57 |
+
outline=color, fill=color)
|
58 |
+
|
59 |
+
# add string labels around keypoints
|
60 |
+
# draw.text((x, y),"Sample Text",(r,g,b))
|
61 |
+
draw.text((keypoint_x + radius, keypoint_y + radius),#(0.5*im_width, 0.5*im_height), #-------
|
62 |
+
map_label_id_to_str[i],#"Sample Text",
|
63 |
+
(255,0,0), # rgb
|
64 |
+
font=font)
|
65 |
+
|
66 |
############################################
|
67 |
|
68 |
# Predict detections with MegaDetector v5a model
|
|
|
80 |
def crop_animal_detections(yolo_results,
|
81 |
likelihood_th):
|
82 |
## crop if animal and return list of crops
|
|
|
83 |
list_labels_as_str = yolo_results.names #['animal', 'person', 'vehicle']
|
84 |
list_np_animal_crops = []
|
85 |
|
|
|
140 |
for crop in list_np_crops:
|
141 |
# scale crop here?
|
142 |
keypts_xyp = dlc_live.get_pose(crop) # third column is llk!
|
143 |
+
# ATT! coord syst for keypoints is bottom left corner? change here to top left for PIL coord system
|
144 |
+
pdb.set_trace()
|
145 |
+
keypts_xyp = np.column_stack((crop.shape[1]-keypts_xyp[:,0],
|
146 |
+
crop.shape[0]-keypts_xyp[:,1],
|
147 |
+
keypts_xyp[:,2]))
|
148 |
+
pdb.set_trace()
|
149 |
# set kpts below threhsold to nan
|
150 |
keypts_xyp[keypts_xyp[:,-1] < kpts_likelihood_th,:] = np_aux.fill(np.nan)
|
151 |
+
# add kpts of this crop to list
|
152 |
list_kpts_per_crop.append(keypts_xyp)
|
153 |
|
154 |
return list_kpts_per_crop
|
|
|
161 |
bbox_likelihood_th,
|
162 |
kpts_likelihood_th):
|
163 |
|
164 |
+
############################################################
|
165 |
+
## Get DLC model and labels as strings
|
166 |
if model_input_str == 'full_cat':
|
167 |
path_to_DLCmodel = "DLC_models/DLC_Cat_resnet_50_iteration-0_shuffle-0"
|
168 |
+
pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
|
169 |
elif model_input_str == 'full_dog':
|
170 |
path_to_DLCmodel = "DLC_models/DLC_Dog_resnet_50_iteration-0_shuffle-0"
|
171 |
+
pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
|
172 |
|
173 |
+
# read pose cfg as dict
|
174 |
+
with open(pose_cfg_path, "r") as stream:
|
175 |
+
pose_cfg_dict = yaml.safe_load(stream)
|
176 |
+
map_label_id_to_str = dict([(k,v) for k,v in zip([el[0] for el in pose_cfg_dict['all_joints']], # pose_cfg_dict['all_joints'] is a list of one-element lists,
|
177 |
+
pose_cfg_dict['all_joints_names'])])
|
178 |
+
|
179 |
+
############################################################
|
180 |
# ### Run Megadetector
|
181 |
md_results = predict_md(img_input) #Image.fromarray(results.imgs[0])
|
182 |
|
183 |
+
################################################################
|
184 |
+
# Obtain animal crops for bboxes with confidence above th
|
185 |
list_crops = crop_animal_detections(md_results,
|
186 |
bbox_likelihood_th)
|
187 |
|
188 |
+
##############################################################
|
189 |
# Run DLC
|
|
|
190 |
dlc_proc = Processor()
|
191 |
+
|
192 |
+
# if required: ignore MD crops and run DLC on full image [mostly for testing]
|
193 |
if flag_dlc_only:
|
194 |
# compute kpts on input img
|
195 |
list_kpts_per_crop = predict_dlc([np.asarray(img_input)],#list_crops,--------
|
|
|
199 |
# draw kpts on input img
|
200 |
draw_keypoints_on_image(img_input,
|
201 |
list_kpts_per_crop[0], # a numpy array with shape [num_keypoints, 2].
|
202 |
+
map_label_id_to_str,
|
203 |
color='red',
|
204 |
radius=2,
|
205 |
use_normalized_coordinates=False)
|
|
|
221 |
img_crop = Image.fromarray(np_crop)
|
222 |
draw_keypoints_on_image(img_crop,
|
223 |
kpts_crop, # a numpy array with shape [num_keypoints, 2].
|
224 |
+
map_label_id_to_str,
|
225 |
color='red',
|
226 |
radius=2,
|
227 |
+
use_normalized_coordinates=False, # if True, then I should use md_results.xyxyn
|
228 |
+
)
|
229 |
|
230 |
## Paste crop in original image
|
231 |
# https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.paste
|
|
|
256 |
gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.05,0.8,
|
257 |
label='Set confidence threshold for animal detections')
|
258 |
gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
|
259 |
+
label='Set confidence threshold for keypoints')
|
260 |
#image = gr.inputs.Image(type="pil", label="Input Image")
|
261 |
#chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
|
262 |
#size = 640
|
263 |
|
264 |
gr_title = "MegaDetector v5 + DLClive"
|
265 |
+
gr_description = "Detect and estimate the pose of animals in camera trap images, using MegaDetector v5a + DeepLabCut-live. \
|
266 |
+
Builds up on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a>"
|
267 |
# article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
|
268 |
# examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
|
269 |
|
requirements.txt
CHANGED
@@ -4,4 +4,5 @@ torchvision
|
|
4 |
numpy
|
5 |
opencv-python
|
6 |
seaborn
|
7 |
-
deeplabcut-live
|
|
|
|
4 |
numpy
|
5 |
opencv-python
|
6 |
seaborn
|
7 |
+
deeplabcut-live
|
8 |
+
pyyaml
|