File size: 1,436 Bytes
7068d76
a8105ab
7068d76
 
a8105ab
c899ba0
7068d76
a8105ab
7068d76
 
 
 
 
 
 
 
 
 
a2a4533
7068d76
1227bfb
00709c7
b7e7f09
1227bfb
7068d76
f7dafa5
 
00709c7
7068d76
 
 
f7dafa5
 
7068d76
 
00709c7
 
f7dafa5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch
import gradio as gr
from huggingface_hub import hf_hub_download
from PIL import Image

REPO_ID = "rgp/yolov5-street-view-detection"
FILENAME = "best.pt"

yolov5_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)

model = torch.hub.load('ultralytics/yolov5', 'custom', path=yolov5_weights, force_reload=True)  # local repo

def object_detection(im, size=640):
    results = model(im)  # inference
    #results.print()  # print results to screen
    #results.show()  # display results
    #results.save()  # save as results1.jpg, results2.jpg... etc.
    results.render()  # updates results.imgs with boxes and labels
    return Image.fromarray(results.ims[0])

title = "Pedestrians and Transportations detection on the streets"
description = """This model is a small demo based on an analysis of 680 images.
"""
css = ".output-image, .input-image, .image-preview {height: 640px !important}"

input = gr.inputs.Image(shape=(640, 640), image_mode="RGB", source="upload", label="Imagem", optional=False)
output = gr.outputs.Image(type="pil", label="Output Image")
examples = [["sample_images/IMG_0125.jpeg"], ["sample_images/IMG_0129.jpg"], ["sample_images/IMG_0157.jpg"], ["sample_images/IMG_0158.jpeg"], ["sample_images/IMG_012.jpg"]]

gr.Interface(
    fn=object_detection,
    inputs=input,
    outputs=output,
    title=title,
    description=description,
    examples=examples,
    css=css
).launch(debug=True)