File size: 1,443 Bytes
8208854
5ae1f66
 
 
 
460be3e
 
9c3debd
5ae1f66
 
 
 
 
 
 
 
8862be7
5ae1f66
 
 
b7cd040
3e9224c
5ae1f66
 
 
44e45d3
5ae1f66
3e9224c
b7cd040
5ae1f66
 
 
 
8208854
5ae1f66
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
import gradio as gr
from huggingface_hub import hf_hub_download
from PIL import Image

yolov7_custom_weights = "best.pt"

model = torch.hub.load('Owaiskhan9654/yolov7-1:main',model='custom', path_or_model=yolov7_custom_weights, force_reload=True) 

def object_detection(image: gr.inputs.Image = None):
    results = model(image) 
    results.render()  
    count_dict = results.pandas().xyxy[0]['name'].value_counts().to_dict()
    if len(count_dict)>0:
      return Image.fromarray(results.imgs[0]),str(count_dict)
    else:
        return Image.fromarray(results.imgs[0]),'No objects found! Try another image.'

title = "Yolov7 Custom"

inputs = gr.inputs.Image(shape=(1920, 1080), image_mode="RGB", source="upload", label="Upload Image", optional=False)

outputs = gr.outputs.Image(type="pil", label="Output Image")
outputs_cls = gr.Label(label= "Categories Detected Proportion Statistics" )

examples1=[["image0.jpg"],["image1.jpg"],["image2.jpg"],["image3.jpg"],["image4.jpg"],["image5.jpg"],["image6.jpg"],["image7.jpg"]]

Top_Title="Yolov7 πŸš€ Visual Pollution Detection"
css = ".output-image, .input-image {height: 50rem !important; width: 100% !important;}"
css = ".image-preview {height: auto !important;}"

gr.Interface(
    fn=object_detection,
    inputs=inputs,
    outputs=[outputs,outputs_cls],
    title=Top_Title,
    cache_examples= False,
    allow_flagging='never',
    examples=examples1).launch(debug=True)