unnati026 commited on
Commit
2f10eff
1 Parent(s): ffc50d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -72
app.py CHANGED
@@ -19,14 +19,7 @@ red_tint = np.array([[[0, 0, 255]]], dtype=np.uint8)
19
 
20
  model1 = YOLO('yolov8n.pt')
21
 
22
- # Set the theme to light mode
23
- # st.set_theme("light")
24
-
25
- # Set page config
26
- st.set_page_config(page_title="Object Detection App", page_icon="🚗")
27
-
28
  st.title("Object Detection and Recognition")
29
-
30
  st.write("""
31
  This web app performs object detection and recognition on a video using YOLOv8.
32
  It detects various objects, such as people, cars, trucks, backpacks, suspicious handheld devices, handbags, and suitcases.
@@ -35,6 +28,15 @@ The processed video is displayed with alerts highlighted, and you can stop the i
35
 
36
  video_file = st.file_uploader("Choose a video file", type=["mp4"])
37
 
 
 
 
 
 
 
 
 
 
38
  if video_file is not None:
39
  # Create temporary file for uploaded video
40
  tfile = tempfile.NamedTemporaryFile(delete=False)
@@ -49,21 +51,15 @@ if video_file is not None:
49
  red_tinted_overlay = np.tile(red_tint, (1, 1, 1))
50
 
51
  stop_button = st.button("Stop Inference")
52
- processing_interrupted = False
53
-
54
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
55
- progress_bar_processing_slot = st.empty()
56
 
57
  # Collect frames in a list
58
  frames = []
 
59
 
60
- # Set the desired size as a percentage of the original size
61
- target_size_percentage = 0.65
62
-
63
- while cap.isOpened() and not processing_interrupted:
64
- alert_flag = False
65
- alert_reason = []
66
 
 
67
  success, frame = cap.read()
68
 
69
  # if the frame is read correctly ret is True
@@ -71,18 +67,9 @@ if video_file is not None:
71
  # st.warning("Can't receive frame (stream end?). Exiting ...")
72
  break
73
 
74
- # Calculate the target size
75
- target_width = int(frame.shape[1] * target_size_percentage)
76
- target_height = int(frame.shape[0] * target_size_percentage)
77
-
78
- # Resize the frame
79
- frame = cv2.resize(frame, (target_width, target_height))
80
-
81
- if success:
82
- # Check if the stop button is clicked
83
- if stop_button:
84
- processing_interrupted = True
85
- break
86
 
87
  # Perform YOLO object detection
88
  results = model1(frame, conf=0.35, verbose=False, classes=list(alerting_classes.keys()))
@@ -99,59 +86,54 @@ if video_file is not None:
99
  alert_flag = True
100
  alert_reason.append((0, class_counts[0]))
101
 
102
- text = 'ALERT!'
103
- font = cv2.FONT_HERSHEY_DUPLEX
104
- font_scale = 0.75
105
- thickness = 2
 
 
 
 
106
 
107
- size = cv2.getTextSize(text, font, font_scale, thickness)
108
- x = 0
109
- y = int((2 + size[0][1]))
 
 
 
110
 
111
- img = results[0].plot()
112
- if alert_flag:
113
- # Resize the red-tinted overlay to match the image size
114
- red_tinted_overlay = cv2.resize(red_tinted_overlay, (img.shape[1], img.shape[0]))
115
- img = cv2.addWeighted(img, 0.7, red_tinted_overlay, 0.3, 0)
116
- cv2.putText(img, text, (x, y), font, font_scale, (0, 0, 0), thickness)
117
 
 
 
 
118
  y += int(size[0][1]) + 10 # Move to the next line
119
 
120
- for cls, count in alert_reason:
121
- alert_text = f'{count} {alerting_classes[cls]}'
122
- cv2.putText(img, alert_text, (x, y), font, font_scale, (0, 0, 0), thickness)
123
- y += int(size[0][1]) + 10 # Move to the next line
124
 
125
- # Append the frame to the list
126
- frames.append(img)
 
127
 
128
- del results
129
 
130
- # Update processing progress bar
131
- current_frame_processing = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
132
- progress = current_frame_processing / total_frames
133
- progress_bar_processing_slot.progress(progress)
134
- progress_bar_processing_slot.text(f"Processing... {int(progress * 100)}%")
135
 
136
  # Release resources
 
137
  cap.release()
138
  tfile.close()
139
 
140
- # Display frames one by one as a video with 24 FPS
141
- if processing_interrupted:
142
- st.text("User interrupted processing.")
143
- else:
144
- progress_bar_processing_slot.text("Done!")
145
- video_placeholder = st.image([])
146
- progress_bar_display = st.progress(0)
147
- fps_delay = 1 / 24 # Delay to achieve 24 FPS
148
-
149
- for i, frame in enumerate(frames):
150
- video_placeholder.image(frame, channels="BGR", caption="YOLOv8 Inference")
151
- # Update display progress bar
152
- progress_bar_display.progress((i + 1) / len(frames))
153
- # Introduce a delay to achieve 24 FPS
154
- time.sleep(fps_delay)
155
-
156
- # Display completion message
157
- progress_bar_processing_slot.text("Video Playback Finished!")
 
19
 
20
  model1 = YOLO('yolov8n.pt')
21
 
 
 
 
 
 
 
22
  st.title("Object Detection and Recognition")
 
23
  st.write("""
24
  This web app performs object detection and recognition on a video using YOLOv8.
25
  It detects various objects, such as people, cars, trucks, backpacks, suspicious handheld devices, handbags, and suitcases.
 
28
 
29
  video_file = st.file_uploader("Choose a video file", type=["mp4"])
30
 
31
+ video_placeholder = st.image([])
32
+ results = None
33
+
34
+ centered_text = """
35
+ <div style="text-align: center;">
36
+ Built with ❤️ by Unnati
37
+ </div>
38
+ """
39
+
40
  if video_file is not None:
41
  # Create temporary file for uploaded video
42
  tfile = tempfile.NamedTemporaryFile(delete=False)
 
51
  red_tinted_overlay = np.tile(red_tint, (1, 1, 1))
52
 
53
  stop_button = st.button("Stop Inference")
 
 
 
 
54
 
55
  # Collect frames in a list
56
  frames = []
57
+ frame_counter = 0 # Counter to track frame number
58
 
59
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
60
+ progress_bar_processing = st.progress(0)
 
 
 
 
61
 
62
+ while cap.isOpened() and not stop_button:
63
  success, frame = cap.read()
64
 
65
  # if the frame is read correctly ret is True
 
67
  # st.warning("Can't receive frame (stream end?). Exiting ...")
68
  break
69
 
70
+ if frame_counter % 4 == 0: # Perform inference on every 4th frame
71
+ alert_flag = False
72
+ alert_reason = []
 
 
 
 
 
 
 
 
 
73
 
74
  # Perform YOLO object detection
75
  results = model1(frame, conf=0.35, verbose=False, classes=list(alerting_classes.keys()))
 
86
  alert_flag = True
87
  alert_reason.append((0, class_counts[0]))
88
 
89
+ text = 'ALERT!'
90
+ font = cv2.FONT_HERSHEY_DUPLEX
91
+ font_scale = 0.75
92
+ thickness = 2
93
+
94
+ size = cv2.getTextSize(text, font, font_scale, thickness)
95
+ x = 0
96
+ y = int((2 + size[0][1]))
97
 
98
+ img = results[0].plot()
99
+ if alert_flag:
100
+ # Resize the red-tinted overlay to match the image size
101
+ red_tinted_overlay = cv2.resize(red_tinted_overlay, (img.shape[1], img.shape[0]))
102
+ img = cv2.addWeighted(img, 0.7, red_tinted_overlay, 0.3, 0)
103
+ cv2.putText(img, text, (x, y), font, font_scale, (0, 0, 0), thickness)
104
 
105
+ y += int(size[0][1]) + 10 # Move to the next line
 
 
 
 
 
106
 
107
+ for cls, count in alert_reason:
108
+ alert_text = f'{count} {alerting_classes[cls]}'
109
+ cv2.putText(img, alert_text, (x, y), font, font_scale, (0, 0, 0), thickness)
110
  y += int(size[0][1]) + 10 # Move to the next line
111
 
112
+ # Append the frame to the list
113
+ frames.append(img)
 
 
114
 
115
+ # Update processing progress bar
116
+ current_frame_processing = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
117
+ progress_bar_processing.progress(current_frame_processing / total_frames)
118
 
119
+ frame_counter += 1 # Increment frame counter
120
 
121
+ # Get the fps from the video capture object
122
+ fps = cap.get(cv2.CAP_PROP_FPS)
123
+ frame_delay = 1 / fps if fps > 0 else 1 / 24 # Use 24 fps as a fallback if fps is not available
 
 
124
 
125
  # Release resources
126
+ del results
127
  cap.release()
128
  tfile.close()
129
 
130
+ # Display frames one by one as a video with progress bar
131
+ progress_bar_display = st.progress(0)
132
+ for i, frame in enumerate(frames):
133
+ video_placeholder.image(frame, channels="BGR", caption="YOLOv8 Inference")
134
+ # Update display progress bar
135
+ progress_bar_display.progress((i + 1) / len(frames))
136
+ time.sleep(frame_delay)
137
+
138
+ st.markdown("<hr>", unsafe_allow_html=True)
139
+ st.markdown(centered_text, unsafe_allow_html=True)