import gradio as gr import cv2 import requests import os import torch import numpy as np from ultralytics import YOLO model = torch.hub.load('ultralytics/yolov5', 'yolov5l', pretrained=True) path = [['image_0.jpg'], ['image_1.jpg']] video_path = [['video_test.mp4']] # area = [(25,430), (10, 515), (407,485), (750,425), (690,370)] area = [(48,430), (18, 515), (407,485), (750,425), (690,370)] total_space = 12 count=0 def show_preds_video(): cap = cv2.VideoCapture('Video_1.mp4') count=0 while(cap.isOpened()): ret, frame = cap.read() if not ret: break count += 1 if count % 2 != 0: continue frame=cv2.resize(frame,(1020,600)) frame_copy = frame.copy() Vehicle_cnt = 0 results=model(frame) for index, row in results.pandas().xyxy[0].iterrows(): x1 = int(row['xmin']) y1 = int(row['ymin']) x2 = int(row['xmax']) y2 = int(row['ymax']) d=(row['name']) cx=int(x1+x2)//2 cy=int(y1+y2)//2 if ('car' or 'truck') in d: results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False) if results >0: cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2) cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),2) Vehicle_cnt += 1 # elif ('truck') in d: # results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False) # if results >0: # cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2) # cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,0,0),2) # truck_cnt += 1 free_space = total_space - Vehicle_cnt cv2.putText(frame_copy, ("Free space: " + str(free_space)), (50,50) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2) # cv2.putText(frame_copy, str(str(" car: ")+ str(car_cnt) + str(" truck: ") +str(truck_cnt)), (50,75) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2) cv2.putText(frame_copy, str(str("vehicles: ")+ str(Vehicle_cnt) ), (50,85) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2) cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2) # fps = cap.get(cv2.CAP_PROP_FPS) # cv2.putText(frame_copy,str("fps: ") + str(np.round(fps,0)),(50,100),cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2) yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB) inputs_video = [ #gr.components.Video(type="filepath", label="Input Video"), ] outputs_video = [ gr.components.Image(type="numpy", label="Output Image"), ] interface_video = gr.Interface( fn=show_preds_video, inputs=inputs_video, outputs=outputs_video, title="Parking space counter", description="Click generate !!!'", # examples=video_path, cache_examples=False, ) gr.TabbedInterface( [interface_video], tab_names=['Video inference'] ).queue().launch()