|
import gradio as gr |
|
import torch |
|
from ultralyticsplus import YOLO, render_result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def yoloV8_func(Video: gr.Video = None, |
|
Video_size: gr.Slider = 640, |
|
conf_threshold: gr.Slider = 0.4, |
|
iou_threshold: gr.Slider = 0.50): |
|
"""This function performs YOLOv8 object detection on the given video. |
|
Args: |
|
Video (gr.inputs.Video, optional): Input Video to detect objects on. Defaults to None. |
|
Video_size (gr.inputs.Slider, optional): Desired Video size for the model. Defaults to 640. |
|
conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4. |
|
iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50. |
|
""" |
|
|
|
model_path = "best.pt" |
|
model = YOLO(model_path) |
|
|
|
|
|
results = model.predict(Video, |
|
conf=conf_threshold, |
|
iou=iou_threshold, |
|
imgsz=Video_size) |
|
|
|
|
|
box = results[0].boxes |
|
print("Object type:", box.cls) |
|
print("Coordinates:", box.xyxy) |
|
print("Probability:", box.conf) |
|
|
|
|
|
render = render_result(model=model, Video=Video, result=results[0]) |
|
return render |
|
|
|
|
|
inputs = [ |
|
gr.Video(label="Input Video"), |
|
gr.Slider(minimum=320, maximum=1280, value=640, |
|
step=32, label="Image Size"), |
|
gr.Slider(minimum=0.0, maximum=1.0, value=0.25, |
|
step=0.05, label="Confidence Threshold"), |
|
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, |
|
step=0.05, label="IOU Threshold"), |
|
] |
|
|
|
|
|
outputs = gr.Video(label="Output Video") |
|
|
|
title = "👨💻Made By Team 8848(Aerothon6.0)👨💻: Airplane Video Damage Detection with different advanced IOT integrated features." |
|
|
|
|
|
|
|
|
|
|
|
yolo_app = gr.Interface( |
|
fn=yoloV8_func, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title=title, |
|
|
|
|
|
) |
|
|
|
|
|
yolo_app.launch(share=True, debug=True) |