File size: 1,935 Bytes
3c9fd8a 6c4f88d 2de1103 3c9fd8a 584c4fa 3c9fd8a 584c4fa 3c9fd8a 6c4f88d 73ad393 3c9fd8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
from ultralytics import YOLO
import cv2
import numpy as np
import supervision as sv
import os
from pathlib import Path
from supervision.draw.color import Color
# Create title, description and article strings
title = "Excavator Detector"
description = "An Yolov8(m) object detection computer vision model to detect images of excavators."
excavator_path = Path('.')
example_path = excavator_path / "examples"
# Get example filepaths in a list of lists
example_list = [["examples/" + example] for example in os.listdir(example_path)]
def predict(img, conf_thres=0.35, iou_thres=0.5): # default : conf(0.25), iou(0.7)
model = YOLO('./weights/excavator_only_best_v8m.pt')
classNames = ['excavator']
results = model.predict(source=img, conf=conf_thres, iou=iou_thres)[0]
detections = sv.Detections.from_yolov8(results)
box_annotator = sv.BoxAnnotator()
box_annotator.text_scale = 1.3
box_annotator.text_color = Color.white()
box_annotator.text_thickness = 2
labels = [
f"{classNames[class_id]} {confidence:.2f}"
for class_id, confidence
in zip(detections.class_id, detections.confidence)
]
image = cv2.cvtColor(cv2.imread(img),cv2.COLOR_BGR2RGB)
annotated_image = box_annotator.annotate(image, detections=detections, labels=labels)
return annotated_image, results.speed
# Create the Gradio demo
demo = gr.Interface(fn=predict, # mapping function from input to output
inputs=gr.Image(type="filepath"), # what are the inputs?
outputs=[gr.Image(type='numpy'),
gr.Text(label="Prediction time (ms)")],
examples=example_list,
title=title,
description=description)
# Launch the demo!
demo.launch(debug=False # print errors locally?
) # share=True, generate a publically shareable URL? |