File size: 1,723 Bytes
de1fa38
 
 
 
 
a20be42
de1fa38
 
 
 
 
a20be42
de1fa38
 
 
 
4e5c213
de1fa38
e9d9a23
de1fa38
ed4ad05
de1fa38
 
2676ea4
ed4ad05
 
a20be42
 
de1fa38
 
 
4916e5f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from typing import List
import PIL.Image
import torch
import gradio as gr

article = "<p style='text-align: center'><a href='https://github.com/scoutant/yolo-person-gradio' target='_blank' class='footer'>Github Repo</a></p>"

model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
model.classes = [ 0 ] # only considering class 'person' and not the 79 other classes...
model.conf = 0.6 # only considering detection above the threshold.

def inference(img:PIL.Image.Image, threshold:float=0.6):
    if img is None:
        return None,0
    images:List[PIL.Image.Image] = [ img ] # inference operates on a list of images
    model.conf = threshold
    detections = model(images, size=640)
    predictions:torch.Tensor = detections.pred[0] # the predictions for our single image
    result_image = detections.ims[0] if hasattr(detections, "ims") else detections.imgs[0] # either model.common.Detections or torchvision.Detections
    detections.render() # bounding boxes and labels added into image
    return result_image, predictions.size(dim=0) # image and number of detections

gr.Interface(
    fn = inference,
    inputs = [ gr.Image(type="pil", label="Input"), gr.Slider(minimum=0.5, maximum=0.9, step=0.05, value=0.7, label="Confidence threshold") ],
    outputs = [ gr.Image(type="pil", label="Output"), gr.Label(label="nb of persons detected for given confidence threshold") ],
    title="Person detection with YOLO v5",
    description="Person detection, you can twik the corresponding confidence threshold. Good results even when face not visible.",
    article=article,
    examples=[['data/businessmen-612.jpg'], ['data/businessmen-back.jpg']],
    allow_flagging="never"
).launch(debug=True, enable_queue=True)