Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,12 +2,8 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from sahi.prediction import ObjectPrediction
|
4 |
from sahi.utils.cv import visualize_object_predictions, read_image
|
5 |
-
from
|
6 |
|
7 |
-
# Images
|
8 |
-
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/kadirnar/dethub/main/data/images/highway.jpg', 'highway.jpg')
|
9 |
-
# torch.hub.download_url_to_file('https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg', 'highway1.jpg')
|
10 |
-
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
|
11 |
|
12 |
def yolov8_inference(
|
13 |
image: gr.inputs.Image = None,
|
@@ -28,8 +24,11 @@ def yolov8_inference(
|
|
28 |
Rendered image
|
29 |
"""
|
30 |
model = YOLO(f'{model_path}.pt')
|
31 |
-
model
|
32 |
-
model.
|
|
|
|
|
|
|
33 |
results = model.predict(image, imgsz=image_size, return_outputs=True)
|
34 |
object_prediction_list = []
|
35 |
for _, image_results in enumerate(results):
|
@@ -62,7 +61,7 @@ def yolov8_inference(
|
|
62 |
inputs = [
|
63 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
64 |
gr.inputs.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"],
|
65 |
-
default="
|
66 |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
67 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
68 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
@@ -71,14 +70,14 @@ inputs = [
|
|
71 |
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
72 |
title = "State-of-the-Art YOLO Models for Object detection"
|
73 |
|
74 |
-
# examples = [['
|
75 |
demo_app = gr.Interface(
|
76 |
fn=yolov8_inference,
|
77 |
inputs=inputs,
|
78 |
outputs=outputs,
|
79 |
title=title,
|
80 |
-
|
81 |
-
|
82 |
theme='huggingface',
|
83 |
)
|
84 |
demo_app.launch(debug=True, enable_queue=True)
|
|
|
2 |
import torch
|
3 |
from sahi.prediction import ObjectPrediction
|
4 |
from sahi.utils.cv import visualize_object_predictions, read_image
|
5 |
+
from ultralyticsplus import YOLO
|
6 |
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def yolov8_inference(
|
9 |
image: gr.inputs.Image = None,
|
|
|
24 |
Rendered image
|
25 |
"""
|
26 |
model = YOLO(f'{model_path}.pt')
|
27 |
+
# set model parameters
|
28 |
+
model.overrides['conf'] = conf_threshold # NMS confidence threshold
|
29 |
+
model.overrides['iou'] = iou_threshold # NMS IoU threshold
|
30 |
+
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
31 |
+
model.overrides['max_det'] = 1000 # maximum number of detections per image
|
32 |
results = model.predict(image, imgsz=image_size, return_outputs=True)
|
33 |
object_prediction_list = []
|
34 |
for _, image_results in enumerate(results):
|
|
|
61 |
inputs = [
|
62 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
63 |
gr.inputs.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"],
|
64 |
+
default="yolov8m", label="Model"),
|
65 |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
66 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
67 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
|
|
70 |
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
71 |
title = "State-of-the-Art YOLO Models for Object detection"
|
72 |
|
73 |
+
# examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]]
|
74 |
demo_app = gr.Interface(
|
75 |
fn=yolov8_inference,
|
76 |
inputs=inputs,
|
77 |
outputs=outputs,
|
78 |
title=title,
|
79 |
+
examples=examples,
|
80 |
+
cache_examples=True,
|
81 |
theme='huggingface',
|
82 |
)
|
83 |
demo_app.launch(debug=True, enable_queue=True)
|