shriarul5273 commited on
Commit
c83c77e
1 Parent(s): 3959175
Files changed (2) hide show
  1. app.py +13 -13
  2. data/time.csv +0 -2
app.py CHANGED
@@ -5,8 +5,7 @@ from utils.general import (check_img_size, cv2,
5
  from utils.plots import Annotator, colors
6
  import numpy as np
7
  import gradio as gr
8
- import pandas as pd
9
-
10
  data = 'data/coco128.yaml'
11
 
12
 
@@ -74,8 +73,9 @@ def detect(im,model,device,iou_threshold=0.45,confidence_threshold=0.25):
74
  img = img.unsqueeze(0)
75
 
76
  # Inference
 
77
  pred = model(img, augment=False)
78
-
79
  # NMS
80
  pred = non_max_suppression(pred, confidence_threshold, iou_threshold, None, False, max_det=10)
81
 
@@ -95,7 +95,7 @@ def detect(im,model,device,iou_threshold=0.45,confidence_threshold=0.25):
95
  print(xyxy,label)
96
  annotator.box_label(xyxy, label, color=colors(c, True))
97
 
98
- return imgs
99
 
100
 
101
  def inference(img,model_link,iou_threshold,confidence_threshold):
@@ -116,16 +116,17 @@ def inference2(video,model_link,iou_threshold,confidence_threshold):
116
  fps = frames.get(cv2.CAP_PROP_FPS)
117
  image_size = (int(frames.get(cv2.CAP_PROP_FRAME_WIDTH)),int(frames.get(cv2.CAP_PROP_FRAME_HEIGHT)))
118
  finalVideo = cv2.VideoWriter('output.mp4',cv2.VideoWriter_fourcc(*'VP90'), fps, image_size)
119
- p = 1
120
  while frames.isOpened():
121
  ret,frame = frames.read()
122
  if not ret:
123
  break
124
- frame = detect(frame,model,device,iou_threshold,confidence_threshold)
 
125
  finalVideo.write(frame)
126
  frames.release()
127
  finalVideo.release()
128
- return 'output.mp4'
129
 
130
 
131
 
@@ -134,18 +135,16 @@ examples_images = ['data/images/bus.jpg',
134
  examples_videos = ['data/video/input_0.mp4',
135
  'data/video/input_1.mp4']
136
 
137
- models = ['yolov5n','yolov5s','yolov5m','yolov5l','yolov5x']
138
 
139
  with gr.Blocks() as demo:
140
- csv = pd.read_csv('data/time.csv')
141
- csv['id'] = csv['id'] + 1
142
- csv.to_csv('data/time.csv',index=False)
143
  gr.Markdown("## YOLOv5 Inference")
144
  with gr.Tab("Image"):
145
  gr.Markdown("## YOLOv5 Inference on Image")
146
  with gr.Row():
147
  image_input = gr.Image(type='pil', label="Input Image", source="upload")
148
  image_output = gr.Image(type='pil', label="Output Image", source="upload")
 
149
  image_drop = gr.Dropdown(choices=models,value=models[0])
150
  image_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
151
  image_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
@@ -156,6 +155,7 @@ with gr.Blocks() as demo:
156
  with gr.Row():
157
  video_input = gr.Video(type='pil', label="Input Image", source="upload")
158
  video_output = gr.Video(type="pil", label="Output Image",format="mp4")
 
159
  video_drop = gr.Dropdown(choices=models,value=models[0])
160
  video_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
161
  video_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
@@ -168,9 +168,9 @@ with gr.Blocks() as demo:
168
 
169
  text_button.click(inference, inputs=[image_input,image_drop,
170
  image_iou_threshold,image_conf_threshold],
171
- outputs=image_output)
172
  video_button.click(inference2, inputs=[video_input,video_drop,
173
  video_iou_threshold,video_conf_threshold],
174
- outputs=video_output)
175
 
176
  demo.launch()
5
  from utils.plots import Annotator, colors
6
  import numpy as np
7
  import gradio as gr
8
+ import time
 
9
  data = 'data/coco128.yaml'
10
 
11
 
73
  img = img.unsqueeze(0)
74
 
75
  # Inference
76
+ start = time.time()
77
  pred = model(img, augment=False)
78
+ fps_inference = 1/(time.time()-start)
79
  # NMS
80
  pred = non_max_suppression(pred, confidence_threshold, iou_threshold, None, False, max_det=10)
81
 
95
  print(xyxy,label)
96
  annotator.box_label(xyxy, label, color=colors(c, True))
97
 
98
+ return imgs,fps_inference
99
 
100
 
101
  def inference(img,model_link,iou_threshold,confidence_threshold):
116
  fps = frames.get(cv2.CAP_PROP_FPS)
117
  image_size = (int(frames.get(cv2.CAP_PROP_FRAME_WIDTH)),int(frames.get(cv2.CAP_PROP_FRAME_HEIGHT)))
118
  finalVideo = cv2.VideoWriter('output.mp4',cv2.VideoWriter_fourcc(*'VP90'), fps, image_size)
119
+ fps_video = []
120
  while frames.isOpened():
121
  ret,frame = frames.read()
122
  if not ret:
123
  break
124
+ frame,fps = detect(frame,model,device,iou_threshold,confidence_threshold)
125
+ fps_video.append(fps)
126
  finalVideo.write(frame)
127
  frames.release()
128
  finalVideo.release()
129
+ return 'output.mp4',np.mean(fps_video)
130
 
131
 
132
 
135
  examples_videos = ['data/video/input_0.mp4',
136
  'data/video/input_1.mp4']
137
 
138
+ models = ['yolov5s','yolov5n','yolov5m','yolov5l','yolov5x']
139
 
140
  with gr.Blocks() as demo:
 
 
 
141
  gr.Markdown("## YOLOv5 Inference")
142
  with gr.Tab("Image"):
143
  gr.Markdown("## YOLOv5 Inference on Image")
144
  with gr.Row():
145
  image_input = gr.Image(type='pil', label="Input Image", source="upload")
146
  image_output = gr.Image(type='pil', label="Output Image", source="upload")
147
+ fps_image = gr.Number(value=0,label='FPS')
148
  image_drop = gr.Dropdown(choices=models,value=models[0])
149
  image_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
150
  image_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
155
  with gr.Row():
156
  video_input = gr.Video(type='pil', label="Input Image", source="upload")
157
  video_output = gr.Video(type="pil", label="Output Image",format="mp4")
158
+ fps_video = gr.Number(value=0,label='FPS')
159
  video_drop = gr.Dropdown(choices=models,value=models[0])
160
  video_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
161
  video_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
168
 
169
  text_button.click(inference, inputs=[image_input,image_drop,
170
  image_iou_threshold,image_conf_threshold],
171
+ outputs=[image_output,fps_image])
172
  video_button.click(inference2, inputs=[video_input,video_drop,
173
  video_iou_threshold,video_conf_threshold],
174
+ outputs=[video_output,fps_video])
175
 
176
  demo.launch()
data/time.csv DELETED
@@ -1,2 +0,0 @@
1
- id
2
- 2