unnati026 commited on
Commit
a30dae1
1 Parent(s): 6848e60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -8
app.py CHANGED
@@ -3,6 +3,7 @@ import cv2
3
  import tempfile
4
  from ultralytics import YOLO
5
  import numpy as np
 
6
 
7
  alerting_classes = {
8
  0: 'People',
@@ -35,22 +36,32 @@ if video_file is not None:
35
  # Create red-tinted overlay
36
  red_tinted_overlay = np.tile(red_tint, (1, 1, 1))
37
 
38
- stframe = st.empty()
39
-
40
  stop_button = st.button("Stop Inference")
 
 
 
 
 
 
 
41
 
42
- while cap.isOpened() and not stop_button:
43
  alert_flag = False
44
  alert_reason = []
45
 
46
  success, frame = cap.read()
47
 
48
- # if frame is read correctly ret is True
49
  if not success:
50
- st.warning("Can't receive frame (stream end?). Exiting ...")
51
  break
52
 
53
  if success:
 
 
 
 
 
54
  # Perform YOLO object detection
55
  results = model1(frame, conf=0.35, verbose=False, classes=list(alerting_classes.keys()))
56
 
@@ -66,15 +77,50 @@ if video_file is not None:
66
  alert_flag = True
67
  alert_reason.append((0, class_counts[0]))
68
 
69
- # Draw bounding boxes and alerts if necessary
 
 
 
 
 
 
 
 
70
  img = results[0].plot()
71
  if alert_flag:
 
72
  red_tinted_overlay = cv2.resize(red_tinted_overlay, (img.shape[1], img.shape[0]))
73
  img = cv2.addWeighted(img, 0.7, red_tinted_overlay, 0.3, 0)
 
74
 
75
- stframe.image(img, channels="BGR", caption="YOLOv8 Inference")
 
 
 
 
 
 
 
 
76
 
77
  del results
 
 
 
 
 
 
 
 
78
  cap.release()
79
- cv2.destroyAllWindows()
80
  tfile.close()
 
 
 
 
 
 
 
 
 
 
 
3
  import tempfile
4
  from ultralytics import YOLO
5
  import numpy as np
6
+ import time
7
 
8
  alerting_classes = {
9
  0: 'People',
 
36
  # Create red-tinted overlay
37
  red_tinted_overlay = np.tile(red_tint, (1, 1, 1))
38
 
 
 
39
  stop_button = st.button("Stop Inference")
40
+ processing_interrupted = False
41
+
42
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
43
+ progress_bar_processing_slot = st.empty()
44
+
45
+ # Collect frames in a list
46
+ frames = []
47
 
48
+ while cap.isOpened() and not processing_interrupted:
49
  alert_flag = False
50
  alert_reason = []
51
 
52
  success, frame = cap.read()
53
 
54
+ # if the frame is read correctly ret is True
55
  if not success:
56
+ # st.warning("Can't receive frame (stream end?). Exiting ...")
57
  break
58
 
59
  if success:
60
+ # Check if the stop button is clicked
61
+ if stop_button:
62
+ processing_interrupted = True
63
+ break
64
+
65
  # Perform YOLO object detection
66
  results = model1(frame, conf=0.35, verbose=False, classes=list(alerting_classes.keys()))
67
 
 
77
  alert_flag = True
78
  alert_reason.append((0, class_counts[0]))
79
 
80
+ text = 'ALERT!'
81
+ font = cv2.FONT_HERSHEY_DUPLEX
82
+ font_scale = 0.75
83
+ thickness = 2
84
+
85
+ size = cv2.getTextSize(text, font, font_scale, thickness)
86
+ x = 0
87
+ y = int((2 + size[0][1]))
88
+
89
  img = results[0].plot()
90
  if alert_flag:
91
+ # Resize the red-tinted overlay to match the image size
92
  red_tinted_overlay = cv2.resize(red_tinted_overlay, (img.shape[1], img.shape[0]))
93
  img = cv2.addWeighted(img, 0.7, red_tinted_overlay, 0.3, 0)
94
+ cv2.putText(img, text, (x, y), font, font_scale, (0, 0, 0), thickness)
95
 
96
+ y += int(size[0][1]) + 10 # Move to the next line
97
+
98
+ for cls, count in alert_reason:
99
+ alert_text = f'{count} {alerting_classes[cls]}'
100
+ cv2.putText(img, alert_text, (x, y), font, font_scale, (0, 0, 0), thickness)
101
+ y += int(size[0][1]) + 10 # Move to the next line
102
+
103
+ # Append the frame to the list
104
+ frames.append(img)
105
 
106
  del results
107
+
108
+ # Update processing progress bar
109
+ current_frame_processing = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
110
+ progress = current_frame_processing / total_frames
111
+ progress_bar_processing_slot.progress(progress)
112
+ progress_bar_processing_slot.text(f"Processing... {int(progress * 100)}%")
113
+
114
+ # Release resources
115
  cap.release()
 
116
  tfile.close()
117
+
118
+ # Display frames one by one as a video with 24 FPS
119
+ if processing_interrupted:
120
+ st.text("User interrupted processing.")
121
+ else:
122
+ progress_bar_processing_slot.text("Done!")
123
+ video_placeholder = st.image([], channels="BGR", caption="YOLOv8 Inference")
124
+ progress_bar_display = st.progress(0)
125
+ fps_delay = 1 / 24 # Delay to achieve 24 FPS
126
+