rgp commited on
Commit
4767313
1 Parent(s): ebae4cb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from huggingface_hub import hf_hub_download
4
+ from PIL import Image
5
+
6
+ REPO_ID = "rgp/yolov5-street-view-detection-grayscale"
7
+ FILENAME = "best.pt"
8
+
9
+ yolov5_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
10
+
11
+ model = torch.hub.load('ultralytics/yolov5', 'custom', path=yolov5_weights, force_reload=True) # local repo
12
+
13
+ def object_detection(im, size=640):
14
+ results = model(im) # inference
15
+ #results.print() # print results to screen
16
+ #results.show() # display results
17
+ #results.save() # save as results1.jpg, results2.jpg... etc.
18
+ results.render() # updates results.imgs with boxes and labels
19
+ return Image.fromarray(results.ims[0])
20
+
21
+ title = "Pedestrians and Transportations detection on the streets - GrayScale"
22
+ description = """This model is a small demo based on an analysis of 680 images - GrayScale.
23
+ """
24
+ css = ".output-image, .input-image, .image-preview {height: 640px !important}"
25
+
26
+ input = gr.inputs.Image(shape=(640, 640), image_mode="RGB", source="upload", label="Imagem", optional=False)
27
+ output = gr.outputs.Image(type="pil", label="Output Image")
28
+ examples = [["sample_images/image-2.jpeg"], ["sample_images/image-1.jpg"], ["sample_images/image-3.jpg"], ["sample_images/image-4.jpg"], ["sample_images/image-5.jpg"]]
29
+
30
+ gr.Interface(
31
+ fn=object_detection,
32
+ inputs=input,
33
+ outputs=output,
34
+ title=title,
35
+ description=description,
36
+ examples=examples,
37
+ css=css
38
+ ).launch(debug=True)