coutant commited on
Commit
e9d9a23
1 Parent(s): ed4ad05

fix Detections class attribute imgs versus ims

Browse files
Files changed (1) hide show
  1. app.py +1 -6
app.py CHANGED
@@ -1,7 +1,6 @@
1
  from typing import List
2
  import PIL.Image
3
  import torch
4
- import torchvision
5
  import gradio as gr
6
 
7
  article = "<p style='text-align: center'><a href='https://github.com/scoutant/yolo-person-gradio' target='_blank' class='footer'>Github Repo</a></p>"
@@ -15,14 +14,10 @@ def inference(img:PIL.Image.Image, threshold:float=0.6):
15
  return None,0
16
  images:List[PIL.Image.Image] = [ img ] # inference operates on a list of images
17
  model.conf = threshold
18
- # detections:torchvision.Detections = model(images, size=640)
19
  detections = model(images, size=640)
20
- print( "detections type:" , type(detections))
21
- print( "attributes:" , dir(detections))
22
  predictions:torch.Tensor = detections.pred[0] # the predictions for our single image
23
- result_image=detections.imgs[0]
24
  detections.render() # bounding boxes and labels added into image
25
- # return detections.imgs[0], predictions.size(dim=0) # image and number of detections
26
  return result_image, predictions.size(dim=0) # image and number of detections
27
 
28
  gr.Interface(
1
  from typing import List
2
  import PIL.Image
3
  import torch
 
4
  import gradio as gr
5
 
6
  article = "<p style='text-align: center'><a href='https://github.com/scoutant/yolo-person-gradio' target='_blank' class='footer'>Github Repo</a></p>"
14
  return None,0
15
  images:List[PIL.Image.Image] = [ img ] # inference operates on a list of images
16
  model.conf = threshold
 
17
  detections = model(images, size=640)
 
 
18
  predictions:torch.Tensor = detections.pred[0] # the predictions for our single image
19
+ result_image = detections.ims[0] if hasattr(detections, "ims") else detections.imgs[0] # either model.common.Detections or torchvision.Detections
20
  detections.render() # bounding boxes and labels added into image
 
21
  return result_image, predictions.size(dim=0) # image and number of detections
22
 
23
  gr.Interface(