sabrinabenas commited on
Commit
f52e6b1
β€’
1 Parent(s): 59b822b

add bb text

Browse files
Files changed (1) hide show
  1. app.py +15 -11
app.py CHANGED
@@ -123,6 +123,7 @@ def crop_animal_detections(img_in,
123
 
124
  ## Extract animal crops
125
  #print(yolo_results)
 
126
  list_labels_as_str = [i for i in yolo_results.names.values()] # ['animal', 'person', 'vehicle']
127
  list_np_animal_crops = []
128
 
@@ -160,20 +161,22 @@ def crop_animal_detections(img_in,
160
 
161
  return list_np_animal_crops
162
 
163
- def draw_rectangle_text(img,results,font_style='amiko',font_size=8,):
164
  bbxyxy = results.xyxy[0].tolist()[0]
165
  w, h = bbxyxy[2], bbxyxy[3]
166
  shape = [(bbxyxy[0], bbxyxy[1]), (w , h)]
167
  imgR = ImageDraw.Draw(img)
168
- imgR.rectangle(shape, outline ="red",width=5)
169
 
170
  confidence = bbxyxy[4]
171
- print(confidence)
172
  font = ImageFont.truetype(FONTS[font_style], font_size)
173
- imgR.text((keypoint_x + marker_size, keypoint_y + marker_size),#(0.5*im_width, 0.5*im_height), #-------
174
- map_label_id_to_str[i],
175
- ImageColor.getcolor(keypt_color, "RGB"), # rgb
176
- font=font)
 
 
177
 
178
  return imgR
179
 
@@ -304,7 +307,8 @@ def predict_pipeline(img_input,
304
  img_background.paste(img_crop,
305
  box = tuple([int(t) for t in md_results.xyxy[0][ic,:2]]))
306
 
307
- draw_rectangle_text(img_background, md_results)
 
308
  return img_background
309
 
310
  #############################################
@@ -377,9 +381,9 @@ gr_description = "Contributed by Sofia Minano, Neslihan Wittek, Nirel Kadzo, Vic
377
 
378
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
379
 
380
- examples = [['example/monkey_full.jpg', 'md_v5a','full_macaque', False, True, 0.5, 0.3, 'amiko', 5, 'blue', 3],
381
- ['example/dog.jpeg', 'md_v5a', 'full_dog', False, True, 0.5, 0.05, 'amiko', 5, 'yellow', 3],
382
- ['example/cat.jpg', 'md_v5a', 'full_cat', False, True, 0.5, 0.05, 'amiko', 5, 'purple', 3]]
383
 
384
  ################################################
385
  # %% Define and launch gradio interface
 
123
 
124
  ## Extract animal crops
125
  #print(yolo_results)
126
+ #pdb.set_trace()
127
  list_labels_as_str = [i for i in yolo_results.names.values()] # ['animal', 'person', 'vehicle']
128
  list_np_animal_crops = []
129
 
 
161
 
162
  return list_np_animal_crops
163
 
164
+ def draw_rectangle_text(img,results,font_style='amiko',font_size=8, keypt_color="white",):
165
  bbxyxy = results.xyxy[0].tolist()[0]
166
  w, h = bbxyxy[2], bbxyxy[3]
167
  shape = [(bbxyxy[0], bbxyxy[1]), (w , h)]
168
  imgR = ImageDraw.Draw(img)
169
+ imgR.rectangle(shape, outline ="red",width=5) ##bb for animal
170
 
171
  confidence = bbxyxy[4]
172
+ string_bb = 'animal ' + str(round(confidence, 2))
173
  font = ImageFont.truetype(FONTS[font_style], font_size)
174
+
175
+ text_size = font.getsize(string_bb) # (h,w)
176
+ position = (bbxyxy[0],bbxyxy[1] - text_size[1] -2 )
177
+ left, top, right, bottom = imgR.textbbox(position, string_bb, font=font)
178
+ imgR.rectangle((left, top-5, right+5, bottom+5), fill="red")
179
+ imgR.text((bbxyxy[0] + 3 ,bbxyxy[1] - text_size[1] -2 ), string_bb, font=font, fill="black")
180
 
181
  return imgR
182
 
 
307
  img_background.paste(img_crop,
308
  box = tuple([int(t) for t in md_results.xyxy[0][ic,:2]]))
309
 
310
+ draw_rectangle_text(img_background, md_results ,font_style=font_style,font_size=font_size, keypt_color=keypt_color)
311
+ #draw_rectangle_text(img,results,font_style='amiko',font_size=8, keypt_color="#ff0000",):
312
  return img_background
313
 
314
  #############################################
 
381
 
382
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
383
 
384
+ examples = [['example/monkey_full.jpg', 'md_v5a','full_macaque', False, True, 0.5, 0.3, 'amiko', 9, 'blue', 3],
385
+ ['example/dog.jpeg', 'md_v5a', 'full_dog', False, True, 0.5, 0.05, 'amiko',9, 'yellow', 3],
386
+ ['example/cat.jpg', 'md_v5a', 'full_cat', False, True, 0.5, 0.05, 'amiko', 9, 'purple', 3]]
387
 
388
  ################################################
389
  # %% Define and launch gradio interface