lucascriistiano's picture
feat: update model and add examples
4a19609
import gradio as gr
import torch
import cv2
model = torch.hub.load('ultralytics/yolov5', 'custom', 'model/best.onnx')
CLASS_COLORS = {
0: [148, 0, 211], # class 1 (violet)
1: [255, 0, 0], # class 2 (red)
2: [255, 127, 0], # class 3 (orange)
3: [255, 255, 0], # class 4 (yellow)
4: [0, 255, 0], # class 5 (green)
5: [0, 0, 255], # class 6 (blue)
}
def object_detection(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = model(image)
bboxes = results.xyxy[0].tolist()
labels = results.xyxy[0][:, -1].long().tolist()
scores = results.xyxy[0][:, -2].tolist()
for bbox, label, score in zip(bboxes, labels, scores):
label_name = results.names[label]
color = CLASS_COLORS[label]
cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
text = f"{label_name} ({score:.2f})"
cv2.putText(image, text, (int(bbox[0]), int(bbox[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
inputs = gr.components.Image(shape=(640, 640))
outputs = gr.components.Image(label='Input Image', shape=(640, 640))
iface = gr.Interface(fn=object_detection, inputs=inputs, outputs=outputs,
examples=['examples/india.jpeg', 'examples/new-york.jpeg', 'examples/pedestrian-bikes.jpeg'])
iface.launch()