Faizan Azizahmed Shaikh
Update app.py
4284723
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import required libraries
from ultralytics import YOLO
import gradio as gr
import cv2
import math
from src.items import classNames
# In[ ]:
# detection function
def yolo_detect(feed, vid):
video = vid
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
# Run inference on the source
results = model(video, stream=True, verbose=False)
frames = list()
# plot annotations
for frame in results:
boxes = frame.boxes
single = frame.orig_img
for box in boxes:
# bounding box
x1, y1, x2, y2 = box.xyxy[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # convert to int values
# put box in cam
cv2.rectangle(single, (x1, y1), (x2, y2), (255, 0, 255), 3)
# object details
cv2.putText(single, classNames[int(box.cls[0])], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
frames.append(single)
cv2.destroyAllWindows()
h, w, c = frames[1].shape
out_file = "output.avi"
fourcc=cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
writer = out = cv2.VideoWriter(out_file, fourcc, 25.0, (w, h))
for i in range(len(frames)):
writer.write(frames[i])
writer.release()
return out_file
# In[ ]:
demo = gr.Interface(fn=yolo_detect,
inputs=[gr.PlayableVideo(source='webcam'), gr.Video(autoplay=True)],
outputs=[gr.PlayableVideo(autoplay=True, format='avi')],
cache_examples=True, allow_flagging='never')
demo.queue()
demo.launch(inline=False, debug=True, show_api=False, quiet=True)