saravanamax commited on
Commit
29ccf3f
·
verified ·
1 Parent(s): b354bdf

Create detection.py

Browse files
Files changed (1) hide show
  1. detection.py +397 -0
detection.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import libraries
2
+ import numpy as np
3
+ import pandas as pd
4
+ import streamlit as st
5
+
6
+
7
+ import cv2
8
+ import skimage
9
+ from PIL import Image, ImageColor
10
+ from ultralytics import YOLO
11
+ from sklearn.metrics import mean_squared_error
12
+
13
+ import os
14
+ import json
15
+ import yaml
16
+ import time
17
+
18
+ def get_labels_dics():
19
+ # Get tactical map keypoints positions dictionary
20
+ json_path = "pitch map labels position.json"
21
+ with open(json_path, 'r') as f:
22
+ keypoints_map_pos = json.load(f)
23
+
24
+ # Get football field keypoints numerical to alphabetical mapping
25
+ yaml_path = "config pitch dataset.yaml"
26
+ with open(yaml_path, 'r') as file:
27
+ classes_names_dic = yaml.safe_load(file)
28
+ classes_names_dic = classes_names_dic['names']
29
+
30
+ # Get football field keypoints numerical to alphabetical mapping
31
+ yaml_path = "config players dataset.yaml"
32
+ with open(yaml_path, 'r') as file:
33
+ labels_dic = yaml.safe_load(file)
34
+ labels_dic = labels_dic['names']
35
+ return keypoints_map_pos, classes_names_dic, labels_dic
36
+
37
+ def create_colors_info(team1_name, team1_p_color, team1_gk_color, team2_name, team2_p_color, team2_gk_color):
38
+ team1_p_color_rgb = ImageColor.getcolor(team1_p_color, "RGB")
39
+ team1_gk_color_rgb = ImageColor.getcolor(team1_gk_color, "RGB")
40
+ team2_p_color_rgb = ImageColor.getcolor(team2_p_color, "RGB")
41
+ team2_gk_color_rgb = ImageColor.getcolor(team2_gk_color, "RGB")
42
+
43
+ colors_dic = {
44
+ team1_name:[team1_p_color_rgb, team1_gk_color_rgb],
45
+ team2_name:[team2_p_color_rgb, team2_gk_color_rgb]
46
+ }
47
+ colors_list = colors_dic[team1_name]+colors_dic[team2_name] # Define color list to be used for detected player team prediction
48
+ color_list_lab = [skimage.color.rgb2lab([i/255 for i in c]) for c in colors_list] # Converting color_list to L*a*b* space
49
+ return colors_dic, color_list_lab
50
+
51
+ def generate_file_name():
52
+ list_video_files = os.listdir('./outputs/')
53
+ idx = 0
54
+ while True:
55
+ idx +=1
56
+ output_file_name = f'detect_{idx}'
57
+ if output_file_name+'.mp4' not in list_video_files:
58
+ break
59
+ return output_file_name
60
+
61
+ def detect(cap, stframe, output_file_name, save_output, model_players, model_keypoints,
62
+ hyper_params, ball_track_hyperparams, plot_hyperparams, num_pal_colors, colors_dic, color_list_lab):
63
+
64
+ show_k = plot_hyperparams[0]
65
+ show_pal = plot_hyperparams[1]
66
+ show_b = plot_hyperparams[2]
67
+ show_p = plot_hyperparams[3]
68
+
69
+ p_conf = hyper_params[0]
70
+ k_conf = hyper_params[1]
71
+ k_d_tol = hyper_params[2]
72
+
73
+ nbr_frames_no_ball_thresh = ball_track_hyperparams[0]
74
+ ball_track_dist_thresh = ball_track_hyperparams[1]
75
+ max_track_length = ball_track_hyperparams[2]
76
+
77
+ nbr_team_colors = len(list(colors_dic.values())[0])
78
+
79
+ if (output_file_name is not None) and (len(output_file_name)==0):
80
+ output_file_name = generate_file_name()
81
+
82
+ # Read tactical map image
83
+ tac_map = cv2.imread('tactical map.jpg')
84
+ tac_width = tac_map.shape[0]
85
+ tac_height = tac_map.shape[1]
86
+
87
+ # Create output video writer
88
+ if save_output:
89
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + tac_width
90
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + tac_height
91
+ output = cv2.VideoWriter(f'./outputs/{output_file_name}.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (width, height))
92
+
93
+ # Create progress bar
94
+ tot_nbr_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
95
+ st_prog_bar = st.progress(0, text='Detection starting.')
96
+
97
+ keypoints_map_pos, classes_names_dic, labels_dic = get_labels_dics()
98
+
99
+ # Set variable to record the time when we processed last frame
100
+ prev_frame_time = 0
101
+ # Set variable to record the time at which we processed current frame
102
+ new_frame_time = 0
103
+
104
+ # Store the ball track history
105
+ ball_track_history = {'src':[],
106
+ 'dst':[]
107
+ }
108
+
109
+ nbr_frames_no_ball = 0
110
+
111
+
112
+
113
+ # Loop over input video frames
114
+ for frame_nbr in range(1, tot_nbr_frames+1):
115
+
116
+ # Update progress bar
117
+ percent_complete = int(frame_nbr/(tot_nbr_frames)*100)
118
+ st_prog_bar.progress(percent_complete, text=f"Detection in progress ({percent_complete}%)")
119
+
120
+ # Read a frame from the video
121
+ success, frame = cap.read()
122
+
123
+ # Reset tactical map image for each new frame
124
+ tac_map_copy = tac_map.copy()
125
+
126
+ if nbr_frames_no_ball>nbr_frames_no_ball_thresh:
127
+ ball_track_history['dst'] = []
128
+ ball_track_history['src'] = []
129
+
130
+ if success:
131
+
132
+ #################### Part 1 ####################
133
+ # Object Detection & Coordiante Transofrmation #
134
+ ################################################
135
+
136
+ # Run YOLOv8 players inference on the frame
137
+ results_players = model_players(frame, conf=p_conf)
138
+ # Run YOLOv8 field keypoints inference on the frame
139
+ results_keypoints = model_keypoints(frame, conf=k_conf)
140
+
141
+
142
+
143
+ ## Extract detections information
144
+ bboxes_p = results_players[0].boxes.xyxy.cpu().numpy() # Detected players, referees and ball (x,y,x,y) bounding boxes
145
+ bboxes_p_c = results_players[0].boxes.xywh.cpu().numpy() # Detected players, referees and ball (x,y,w,h) bounding boxes
146
+ labels_p = list(results_players[0].boxes.cls.cpu().numpy()) # Detected players, referees and ball labels list
147
+ confs_p = list(results_players[0].boxes.conf.cpu().numpy()) # Detected players, referees and ball confidence level
148
+
149
+ bboxes_k = results_keypoints[0].boxes.xyxy.cpu().numpy() # Detected field keypoints (x,y,x,y) bounding boxes
150
+ bboxes_k_c = results_keypoints[0].boxes.xywh.cpu().numpy() # Detected field keypoints (x,y,w,h) bounding boxes
151
+ labels_k = list(results_keypoints[0].boxes.cls.cpu().numpy()) # Detected field keypoints labels list
152
+
153
+
154
+
155
+ # Convert detected numerical labels to alphabetical labels
156
+ detected_labels = [classes_names_dic[i] for i in labels_k]
157
+
158
+ # Extract detected field keypoints coordiantes on the current frame
159
+ detected_labels_src_pts = np.array([list(np.round(bboxes_k_c[i][:2]).astype(int)) for i in range(bboxes_k_c.shape[0])])
160
+
161
+ # Get the detected field keypoints coordinates on the tactical map
162
+ detected_labels_dst_pts = np.array([keypoints_map_pos[i] for i in detected_labels])
163
+
164
+
165
+ ## Calculate Homography transformation matrix when more than 4 keypoints are detected
166
+ if len(detected_labels) > 3:
167
+ # Always calculate homography matrix on the first frame
168
+ if frame_nbr > 1:
169
+ # Determine common detected field keypoints between previous and current frames
170
+ common_labels = set(detected_labels_prev) & set(detected_labels)
171
+ # When at least 4 common keypoints are detected, determine if they are displaced on average beyond a certain tolerance level
172
+ if len(common_labels) > 3:
173
+ common_label_idx_prev = [detected_labels_prev.index(i) for i in common_labels] # Get labels indexes of common detected keypoints from previous frame
174
+ common_label_idx_curr = [detected_labels.index(i) for i in common_labels] # Get labels indexes of common detected keypoints from current frame
175
+ coor_common_label_prev = detected_labels_src_pts_prev[common_label_idx_prev] # Get labels coordiantes of common detected keypoints from previous frame
176
+ coor_common_label_curr = detected_labels_src_pts[common_label_idx_curr] # Get labels coordiantes of common detected keypoints from current frame
177
+ coor_error = mean_squared_error(coor_common_label_prev, coor_common_label_curr) # Calculate error between previous and current common keypoints coordinates
178
+ update_homography = coor_error > k_d_tol # Check if error surpassed the predefined tolerance level
179
+ else:
180
+ update_homography = True
181
+ else:
182
+ update_homography = True
183
+
184
+ if update_homography:
185
+ homog, mask = cv2.findHomography(detected_labels_src_pts, # Calculate homography matrix
186
+ detected_labels_dst_pts)
187
+ if 'homog' in locals():
188
+ detected_labels_prev = detected_labels.copy() # Save current detected keypoint labels for next frame
189
+ detected_labels_src_pts_prev = detected_labels_src_pts.copy() # Save current detected keypoint coordiantes for next frame
190
+
191
+ bboxes_p_c_0 = bboxes_p_c[[i==0 for i in labels_p],:] # Get bounding boxes information (x,y,w,h) of detected players (label 0)
192
+ bboxes_p_c_2 = bboxes_p_c[[i==2 for i in labels_p],:] # Get bounding boxes information (x,y,w,h) of detected ball(s) (label 2)
193
+
194
+ # Get coordinates of detected players on frame (x_cencter, y_center+h/2)
195
+ detected_ppos_src_pts = bboxes_p_c_0[:,:2] + np.array([[0]*bboxes_p_c_0.shape[0], bboxes_p_c_0[:,3]/2]).transpose()
196
+ # Get coordinates of the first detected ball (x_center, y_center)
197
+ detected_ball_src_pos = bboxes_p_c_2[0,:2] if bboxes_p_c_2.shape[0]>0 else None
198
+
199
+ if detected_ball_src_pos is None:
200
+ nbr_frames_no_ball+=1
201
+ else:
202
+ nbr_frames_no_ball=0
203
+
204
+ # Transform players coordinates from frame plane to tactical map plance using the calculated Homography matrix
205
+ pred_dst_pts = [] # Initialize players tactical map coordiantes list
206
+ for pt in detected_ppos_src_pts: # Loop over players frame coordiantes
207
+ pt = np.append(np.array(pt), np.array([1]), axis=0) # Covert to homogeneous coordiantes
208
+ dest_point = np.matmul(homog, np.transpose(pt)) # Apply homography transofrmation
209
+ dest_point = dest_point/dest_point[2] # Revert to 2D-coordiantes
210
+ pred_dst_pts.append(list(np.transpose(dest_point)[:2])) # Update players tactical map coordiantes list
211
+ pred_dst_pts = np.array(pred_dst_pts)
212
+
213
+ # Transform ball coordinates from frame plane to tactical map plance using the calculated Homography matrix
214
+ if detected_ball_src_pos is not None:
215
+ pt = np.append(np.array(detected_ball_src_pos), np.array([1]), axis=0)
216
+ dest_point = np.matmul(homog, np.transpose(pt))
217
+ dest_point = dest_point/dest_point[2]
218
+ detected_ball_dst_pos = np.transpose(dest_point)
219
+
220
+ # track ball history
221
+ if show_b:
222
+ if len(ball_track_history['src'])>0 :
223
+ if np.linalg.norm(detected_ball_src_pos-ball_track_history['src'][-1])<ball_track_dist_thresh:
224
+ ball_track_history['src'].append((int(detected_ball_src_pos[0]), int(detected_ball_src_pos[1])))
225
+ ball_track_history['dst'].append((int(detected_ball_dst_pos[0]), int(detected_ball_dst_pos[1])))
226
+ else:
227
+ ball_track_history['src']=[(int(detected_ball_src_pos[0]), int(detected_ball_src_pos[1]))]
228
+ ball_track_history['dst']=[(int(detected_ball_dst_pos[0]), int(detected_ball_dst_pos[1]))]
229
+ else:
230
+ ball_track_history['src'].append((int(detected_ball_src_pos[0]), int(detected_ball_src_pos[1])))
231
+ ball_track_history['dst'].append((int(detected_ball_dst_pos[0]), int(detected_ball_dst_pos[1])))
232
+
233
+ if len(ball_track_history) > max_track_length:
234
+ ball_track_history['src'].pop(0)
235
+ ball_track_history['dst'].pop(0)
236
+
237
+
238
+ ######### Part 2 ##########
239
+ # Players Team Prediction #
240
+ ###########################
241
+
242
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert frame to RGB
243
+ obj_palette_list = [] # Initialize players color palette list
244
+ palette_interval = (0,num_pal_colors) # Color interval to extract from dominant colors palette (1rd to 5th color)
245
+
246
+ ## Loop over detected players (label 0) and extract dominant colors palette based on defined interval
247
+ for i, j in enumerate(list(results_players[0].boxes.cls.cpu().numpy())):
248
+ if int(j) == 0:
249
+ bbox = results_players[0].boxes.xyxy.cpu().numpy()[i,:] # Get bbox info (x,y,x,y)
250
+ obj_img = frame_rgb[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] # Crop bbox out of the frame
251
+ obj_img_w, obj_img_h = obj_img.shape[1], obj_img.shape[0]
252
+ center_filter_x1 = np.max([(obj_img_w//2)-(obj_img_w//5), 1])
253
+ center_filter_x2 = (obj_img_w//2)+(obj_img_w//5)
254
+ center_filter_y1 = np.max([(obj_img_h//3)-(obj_img_h//5), 1])
255
+ center_filter_y2 = (obj_img_h//3)+(obj_img_h//5)
256
+ center_filter = obj_img[center_filter_y1:center_filter_y2,
257
+ center_filter_x1:center_filter_x2]
258
+ obj_pil_img = Image.fromarray(np.uint8(center_filter)) # Convert to pillow image
259
+ reduced = obj_pil_img.convert("P", palette=Image.Palette.WEB) # Convert to web palette (216 colors)
260
+ palette = reduced.getpalette() # Get palette as [r,g,b,r,g,b,...]
261
+ palette = [palette[3*n:3*n+3] for n in range(256)] # Group 3 by 3 = [[r,g,b],[r,g,b],...]
262
+ color_count = [(n, palette[m]) for n,m in reduced.getcolors()] # Create list of palette colors with their frequency
263
+ RGB_df = pd.DataFrame(color_count, columns = ['cnt', 'RGB']).sort_values( # Create dataframe based on defined palette interval
264
+ by = 'cnt', ascending = False).iloc[
265
+ palette_interval[0]:palette_interval[1],:]
266
+ palette = list(RGB_df.RGB) # Convert palette to list (for faster processing)
267
+
268
+ # Update detected players color palette list
269
+ obj_palette_list.append(palette)
270
+
271
+ ## Calculate distances between each color from every detected player color palette and the predefined teams colors
272
+ players_distance_features = []
273
+ # Loop over detected players extracted color palettes
274
+ for palette in obj_palette_list:
275
+ palette_distance = []
276
+ palette_lab = [skimage.color.rgb2lab([i/255 for i in color]) for color in palette] # Convert colors to L*a*b* space
277
+ # Loop over colors in palette
278
+ for color in palette_lab:
279
+ distance_list = []
280
+ # Loop over predefined list of teams colors
281
+ for c in color_list_lab:
282
+ #distance = np.linalg.norm([i/255 - j/255 for i,j in zip(color,c)])
283
+ distance = skimage.color.deltaE_cie76(color, c) # Calculate Euclidean distance in Lab color space
284
+ distance_list.append(distance) # Update distance list for current color
285
+ palette_distance.append(distance_list) # Update distance list for current palette
286
+ players_distance_features.append(palette_distance) # Update distance features list
287
+
288
+ ## Predict detected players teams based on distance features
289
+ players_teams_list = []
290
+ # Loop over players distance features
291
+ for distance_feats in players_distance_features:
292
+ vote_list=[]
293
+ # Loop over distances for each color
294
+ for dist_list in distance_feats:
295
+ team_idx = dist_list.index(min(dist_list))//nbr_team_colors # Assign team index for current color based on min distance
296
+ vote_list.append(team_idx) # Update vote voting list with current color team prediction
297
+ players_teams_list.append(max(vote_list, key=vote_list.count)) # Predict current player team by vote counting
298
+
299
+
300
+ #################### Part 3 #####################
301
+ # Updated Frame & Tactical Map With Annotations #
302
+ #################################################
303
+
304
+ ball_color_bgr = (0,0,255) # Color (GBR) for ball annotation on tactical map
305
+ j=0 # Initializing counter of detected players
306
+ palette_box_size = 10 # Set color box size in pixels (for display)
307
+ annotated_frame = frame # Create annotated frame
308
+
309
+ # Loop over all detected object by players detection model
310
+ for i in range(bboxes_p.shape[0]):
311
+ conf = confs_p[i] # Get confidence of current detected object
312
+ if labels_p[i]==0: # Display annotation for detected players (label 0)
313
+
314
+ # Display extracted color palette for each detected player
315
+ if show_pal:
316
+ palette = obj_palette_list[j] # Get color palette of the detected player
317
+ for k, c in enumerate(palette):
318
+ c_bgr = c[::-1] # Convert color to BGR
319
+ annotated_frame = cv2.rectangle(annotated_frame, (int(bboxes_p[i,2])+3, # Add color palette annotation on frame
320
+ int(bboxes_p[i,1])+k*palette_box_size),
321
+ (int(bboxes_p[i,2])+palette_box_size,
322
+ int(bboxes_p[i,1])+(palette_box_size)*(k+1)),
323
+ c_bgr, -1)
324
+
325
+ team_name = list(colors_dic.keys())[players_teams_list[j]] # Get detected player team prediction
326
+ color_rgb = colors_dic[team_name][0] # Get detected player team color
327
+ color_bgr = color_rgb[::-1] # Convert color to bgr
328
+ if show_p:
329
+ annotated_frame = cv2.rectangle(annotated_frame, (int(bboxes_p[i,0]), int(bboxes_p[i,1])), # Add bbox annotations with team colors
330
+ (int(bboxes_p[i,2]), int(bboxes_p[i,3])), color_bgr, 1)
331
+
332
+ annotated_frame = cv2.putText(annotated_frame, team_name + f" {conf:.2f}", # Add team name annotations
333
+ (int(bboxes_p[i,0]), int(bboxes_p[i,1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
334
+ color_bgr, 2)
335
+
336
+ # Add tactical map player postion color coded annotation if more than 3 field keypoints are detected
337
+ if 'homog' in locals():
338
+ tac_map_copy = cv2.circle(tac_map_copy, (int(pred_dst_pts[j][0]),int(pred_dst_pts[j][1])),
339
+ radius=5, color=color_bgr, thickness=-1)
340
+ tac_map_copy = cv2.circle(tac_map_copy, (int(pred_dst_pts[j][0]),int(pred_dst_pts[j][1])),
341
+ radius=5, color=(0,0,0), thickness=1)
342
+
343
+ j+=1 # Update players counter
344
+ else: # Display annotation for otehr detections (label 1, 2)
345
+ annotated_frame = cv2.rectangle(annotated_frame, (int(bboxes_p[i,0]), int(bboxes_p[i,1])), # Add white colored bbox annotations
346
+ (int(bboxes_p[i,2]), int(bboxes_p[i,3])), (255,255,255), 1)
347
+ annotated_frame = cv2.putText(annotated_frame, labels_dic[labels_p[i]] + f" {conf:.2f}", # Add white colored label text annotations
348
+ (int(bboxes_p[i,0]), int(bboxes_p[i,1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
349
+ (255,255,255), 2)
350
+
351
+ # Add tactical map ball postion annotation if detected
352
+ if detected_ball_src_pos is not None and 'homog' in locals():
353
+ tac_map_copy = cv2.circle(tac_map_copy, (int(detected_ball_dst_pos[0]),
354
+ int(detected_ball_dst_pos[1])), radius=5,
355
+ color=ball_color_bgr, thickness=3)
356
+ if show_k:
357
+ for i in range(bboxes_k.shape[0]):
358
+ annotated_frame = cv2.rectangle(annotated_frame, (int(bboxes_k[i,0]), int(bboxes_k[i,1])), # Add bbox annotations with team colors
359
+ (int(bboxes_k[i,2]), int(bboxes_k[i,3])), (0,0,0), 1)
360
+ # Plot the tracks
361
+ if len(ball_track_history['src'])>0:
362
+ points = np.hstack(ball_track_history['dst']).astype(np.int32).reshape((-1, 1, 2))
363
+ tac_map_copy = cv2.polylines(tac_map_copy, [points], isClosed=False, color=(0, 0, 100), thickness=2)
364
+
365
+
366
+
367
+ # Combine annotated frame and tactical map in one image with colored border separation
368
+ border_color = [255,255,255] # Set border color (BGR)
369
+ annotated_frame=cv2.copyMakeBorder(annotated_frame, 40, 10, 10, 10, # Add borders to annotated frame
370
+ cv2.BORDER_CONSTANT, value=border_color)
371
+ tac_map_copy = cv2.copyMakeBorder(tac_map_copy, 70, 50, 10, 10, cv2.BORDER_CONSTANT, # Add borders to tactical map
372
+ value=border_color)
373
+ tac_map_copy = cv2.resize(tac_map_copy, (tac_map_copy.shape[1], annotated_frame.shape[0])) # Resize tactical map
374
+ final_img = cv2.hconcat((annotated_frame, tac_map_copy)) # Concatenate both images
375
+ ## Add info annotation
376
+ cv2.putText(final_img, "Tactical Map", (1370,60), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,0), 2)
377
+
378
+ new_frame_time = time.time() # Get time after finished processing current frame
379
+ fps = 1/(new_frame_time-prev_frame_time) # Calculate FPS as 1/(frame proceesing duration)
380
+ prev_frame_time = new_frame_time # Save current time to be used in next frame
381
+ cv2.putText(final_img, "FPS: " + str(int(fps)), (20,30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,0), 2)
382
+
383
+ # Display the annotated frame
384
+ stframe.image(final_img, channels="BGR")
385
+ #cv2.imshow("YOLOv8 Inference", frame)
386
+ if save_output:
387
+ output.write(cv2.resize(final_img, (width, height)))
388
+
389
+
390
+ # Remove progress bar and return
391
+ st_prog_bar.empty()
392
+ return True
393
+
394
+
395
+
396
+
397
+