File size: 1,418 Bytes
9183f24
9bd7097
b7cad3f
9183f24
b7cad3f
 
 
 
 
 
 
 
 
 
 
9183f24
9bd7097
 
b7cad3f
 
9bd7097
 
b7cad3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bd7097
b7cad3f
 
9bd7097
4a19609
 
9bd7097
9183f24
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
import torch
import cv2

model = torch.hub.load('ultralytics/yolov5', 'custom', 'model/best.onnx')


CLASS_COLORS = {
    0: [148, 0, 211],  # class 1 (violet)
    1: [255, 0, 0],    # class 2 (red)
    2: [255, 127, 0],  # class 3 (orange)
    3: [255, 255, 0],  # class 4 (yellow)
    4: [0, 255, 0],    # class 5 (green)
    5: [0, 0, 255],    # class 6 (blue)
}


def object_detection(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    results = model(image)

    bboxes = results.xyxy[0].tolist()
    labels = results.xyxy[0][:, -1].long().tolist()
    scores = results.xyxy[0][:, -2].tolist()

    for bbox, label, score in zip(bboxes, labels, scores):
        label_name = results.names[label]
        color = CLASS_COLORS[label]

        cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
        text = f"{label_name} ({score:.2f})"
        cv2.putText(image, text, (int(bbox[0]), int(bbox[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    return image


inputs = gr.components.Image(shape=(640, 640))
outputs = gr.components.Image(label='Input Image', shape=(640, 640))

iface = gr.Interface(fn=object_detection, inputs=inputs, outputs=outputs,
                     examples=['examples/india.jpeg', 'examples/new-york.jpeg', 'examples/pedestrian-bikes.jpeg'])

iface.launch()