Spaces:
Running
Running
import gradio as gr | |
import torch | |
from sahi.prediction import ObjectPrediction | |
from sahi.utils.cv import visualize_object_predictions, read_image | |
from ultralyticsplus import YOLO, render_result | |
def yolov8_inference( | |
image, | |
model_path, | |
image_size, | |
conf_threshold, | |
iou_threshold, | |
): | |
""" | |
YOLOv8 inference function | |
Args: | |
image: Input image | |
model_path: Path to the model | |
image_size: Image size | |
conf_threshold: Confidence threshold | |
iou_threshold: IOU threshold | |
Returns: | |
Rendered image | |
""" | |
model = YOLO(f'kadirnar/{model_path}-v8.0') | |
# set model parameters | |
model.overrides['conf'] = conf_threshold # NMS confidence threshold | |
model.overrides['iou'] = iou_threshold # NMS IoU threshold | |
model.overrides['agnostic_nms'] = False # NMS class-agnostic | |
model.overrides['max_det'] = 1000 # maximum number of detections per image | |
results = model.predict(image, imgsz=image_size) | |
render = render_result(model=model, image=image, result=results[0]) | |
return render | |
inputs = [ | |
gr.Image(type="filepath", label="Input Image"), | |
gr.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"], | |
value="yolov8m", label="Model"), | |
gr.Slider(minimum=320, maximum=1280, value=640, step=320, label="Image Size"), | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"), | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"), | |
] | |
outputs = gr.Image(type="filepath", label="Output Image") | |
title = "State-of-the-Art YOLO Models for Object detection" | |
examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]] | |
demo_app = gr.Interface( | |
fn=yolov8_inference, | |
inputs=inputs, | |
outputs=outputs, | |
title=title, | |
examples=examples, | |
cache_examples=True, | |
) | |
demo_app.launch(debug=True) |