Spaces:
Runtime error
Runtime error
Mahimai Raja J
commited on
Commit
•
f15cb69
1
Parent(s):
d1133a5
Source code >>
Browse files- app.py +135 -0
- utils/modules.py +257 -0
app.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
from utils.modules import detect, detectVideo, getDataframe
|
4 |
+
from utils.modules import getFlag, setFlag, resetFlag
|
5 |
+
from utils.modules import initial_setup
|
6 |
+
|
7 |
+
|
8 |
+
@st.cache_data
|
9 |
+
def convert_df(df):
|
10 |
+
"""
|
11 |
+
Reads the counts dataframe and
|
12 |
+
returns in CSV format.
|
13 |
+
"""
|
14 |
+
return df.to_csv().encode('utf-8')
|
15 |
+
|
16 |
+
def processImage():
|
17 |
+
"""
|
18 |
+
UI Part if the users chooses
|
19 |
+
to proceess a image.
|
20 |
+
"""
|
21 |
+
# threhold = st.slider('Choose a threshold value', 0.0, 1.0, 0.40)
|
22 |
+
image_file = st.file_uploader("Upload An Image",type=['png','jpeg','jpg'])
|
23 |
+
if image_file is not None:
|
24 |
+
file_details = {"FileName":image_file.name,"FileType":image_file.type}
|
25 |
+
file_type = (image_file.type).split('/')[1]
|
26 |
+
input_file_name = f"data/Input.{file_type}"
|
27 |
+
with open(input_file_name,mode = "wb") as f:
|
28 |
+
f.write(image_file.getbuffer())
|
29 |
+
first_process = int(getFlag())
|
30 |
+
count = detect(input_file_name, )
|
31 |
+
img_ = Image.open("data/result.jpg")
|
32 |
+
st.subheader(f"People Count = {count}")
|
33 |
+
st.image(img_)
|
34 |
+
with open("data/result.jpg", "rb") as file:
|
35 |
+
st.download_button(
|
36 |
+
label="Download image",
|
37 |
+
data=file,
|
38 |
+
file_name="Processed.jpg",
|
39 |
+
mime="image/jpg"
|
40 |
+
)
|
41 |
+
|
42 |
+
def processVideo():
|
43 |
+
"""
|
44 |
+
UI Part if the users chooses
|
45 |
+
to proceess a video.
|
46 |
+
"""
|
47 |
+
# threhold = st.slider('Choose a threshold value', 0.0, 1.0, 0.40)
|
48 |
+
uploaded_video = st.file_uploader("Upload a Video", type = ['mp4','mpeg','mov'])
|
49 |
+
if uploaded_video is not None :
|
50 |
+
file_type = (uploaded_video.type).split('/')[1]
|
51 |
+
input_file_name = f"data/Input.{file_type}"
|
52 |
+
if uploaded_video != None:
|
53 |
+
vid = input_file_name
|
54 |
+
with open(vid, mode='wb') as f:
|
55 |
+
f.write(uploaded_video.read())
|
56 |
+
|
57 |
+
st_video = open(vid,'rb')
|
58 |
+
video_bytes = st_video.read()
|
59 |
+
first_process = int(getFlag())
|
60 |
+
if first_process == 1:
|
61 |
+
with st.spinner('Processing the video ⌛️'):
|
62 |
+
detectVideo(vid, )
|
63 |
+
setFlag()
|
64 |
+
first_process = int(getFlag())
|
65 |
+
st_video = open('data/output.mp4','rb')
|
66 |
+
video_bytes = st_video.read()
|
67 |
+
st.video(video_bytes)
|
68 |
+
df = getDataframe()
|
69 |
+
st.markdown("<h3 style='text-align: center;'>People Visit Trend 📊</h3>", unsafe_allow_html=True)
|
70 |
+
col1, col2 = st.columns([3, 1])
|
71 |
+
|
72 |
+
col1.line_chart(data=df, x='Time', y='Count')
|
73 |
+
col2.dataframe(data=df, )
|
74 |
+
row1, row2, _ = st.columns([3, 3, 5])
|
75 |
+
with open("data/output.mp4", "rb") as file:
|
76 |
+
btn = row1.download_button(
|
77 |
+
label="Download video",
|
78 |
+
data=file,
|
79 |
+
file_name="Processed.mp4",
|
80 |
+
mime="video/mp4"
|
81 |
+
)
|
82 |
+
csv = convert_df(df)
|
83 |
+
|
84 |
+
row2.download_button(
|
85 |
+
label="Download data as CSV",
|
86 |
+
data=csv,
|
87 |
+
file_name='data/density.csv',
|
88 |
+
mime='text/csv',
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
def main():
|
93 |
+
"""
|
94 |
+
UI Part of the entire application.
|
95 |
+
"""
|
96 |
+
st.set_page_config(
|
97 |
+
page_title ="Track-X",
|
98 |
+
page_icon = "🧊",
|
99 |
+
menu_items={
|
100 |
+
'About': "# iKurious People Track-X"
|
101 |
+
}
|
102 |
+
)
|
103 |
+
st.markdown("<h1 style='text-align: center;'>People <span style='color: #9eeade;'>Track-X</span></h1>", unsafe_allow_html=True)
|
104 |
+
st.subheader("Artificial Intelligent System")
|
105 |
+
option = st.selectbox(
|
106 |
+
'What Type of File do you want to work with?',
|
107 |
+
('Images', 'Videos'))
|
108 |
+
if option == "Images":
|
109 |
+
st.title('Image Analysis')
|
110 |
+
processImage()
|
111 |
+
else:
|
112 |
+
st.title('Video Analysis')
|
113 |
+
st.button("Reset", on_click=resetFlag)
|
114 |
+
processVideo()
|
115 |
+
|
116 |
+
with st.expander("About People Track-X"):
|
117 |
+
st.markdown( '<p style="font-size: 30px;"><strong>Welcome to the People \
|
118 |
+
<span style="color: #9eeade;">Track-X</span> App!</strong></p>', unsafe_allow_html= True)
|
119 |
+
st.markdown('<p style = "font-size : 20px; color : white;">This application was \
|
120 |
+
built to analyse the <strong>People Density</strong> \
|
121 |
+
on a particular place.</p>', unsafe_allow_html=True)
|
122 |
+
|
123 |
+
if __name__ == '__main__':
|
124 |
+
__author__ = 'Mahimai Raja J'
|
125 |
+
__version__ = "1.0.0"
|
126 |
+
initial_setup()
|
127 |
+
main()
|
128 |
+
|
129 |
+
# 📌 NOTE :
|
130 |
+
# Do not modify the credits unless you have
|
131 |
+
# legal permission from the authorizing authority .
|
132 |
+
|
133 |
+
# Thank you for helping to maintain the integrity of the
|
134 |
+
# open source community by promoting fair and ethical
|
135 |
+
# use of open source software 💎.
|
utils/modules.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import cv2, os
|
3 |
+
import argparse, subprocess
|
4 |
+
import supervision as sv
|
5 |
+
import numpy as np
|
6 |
+
import time, csv, json
|
7 |
+
import datetime, requests
|
8 |
+
|
9 |
+
|
10 |
+
def initial_setup()-> None :
|
11 |
+
"""
|
12 |
+
Initializes the data folder, csv file
|
13 |
+
and signal json files.
|
14 |
+
"""
|
15 |
+
if not os.path.exists('data') :
|
16 |
+
os.makedirs('data')
|
17 |
+
if not os.path.exists("data/density.csv") :
|
18 |
+
fp = open('data/density.csv', 'x')
|
19 |
+
fp.close()
|
20 |
+
if not os.path.exists('data/signal.json'):
|
21 |
+
fp = open('data/signal.json', 'x')
|
22 |
+
fp.close()
|
23 |
+
data = {"Flag" : 1, 'initiate' : 1}
|
24 |
+
with open("data/signal.json", "w") as outfile:
|
25 |
+
json.dump(data, outfile)
|
26 |
+
outfile.close()
|
27 |
+
main()
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
def parse_arguments() -> argparse.Namespace:
|
32 |
+
"""
|
33 |
+
Initializes the commandline argument parser.
|
34 |
+
"""
|
35 |
+
parser = argparse.ArgumentParser(description='Crowd detection')
|
36 |
+
parser.add_argument(
|
37 |
+
'--webcam-resolution',
|
38 |
+
default=[1280,720],
|
39 |
+
nargs=2,
|
40 |
+
type=int
|
41 |
+
)
|
42 |
+
args = parser.parse_args()
|
43 |
+
return args
|
44 |
+
|
45 |
+
|
46 |
+
def main():
|
47 |
+
"""
|
48 |
+
Initializes the global variables to use in other methods.
|
49 |
+
"""
|
50 |
+
from ultralytics import YOLO
|
51 |
+
global args, model, frame_count, startSeconds, firstFrame, \
|
52 |
+
videoFPS, videoHeight, videoWidth, fps_set
|
53 |
+
args = parse_arguments()
|
54 |
+
model = YOLO(model='mahimairaja/people-track-x-model')
|
55 |
+
frame_count = 0
|
56 |
+
startSeconds = datetime.datetime.strptime('00:00:00', '%H:%M:%S')
|
57 |
+
firstFrame = True
|
58 |
+
videoFPS = 0
|
59 |
+
videoWidth = 0
|
60 |
+
videoHeight = 0
|
61 |
+
fps_set = set()
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
def process_frame(frame : np.ndarray, _) -> np.ndarray:
|
66 |
+
"""
|
67 |
+
Processes the frame and return the processed frame
|
68 |
+
with bounding boxex and labels from 'frame'
|
69 |
+
"""
|
70 |
+
ZONE_SIDES = np.array([
|
71 |
+
[0,0],
|
72 |
+
[videoWidth, 0],
|
73 |
+
[videoWidth, videoHeight],
|
74 |
+
[0,videoHeight]
|
75 |
+
])
|
76 |
+
|
77 |
+
zone = sv.PolygonZone(polygon=ZONE_SIDES, frame_resolution_wh=tuple(args.webcam_resolution))
|
78 |
+
|
79 |
+
start_time = time.time()
|
80 |
+
|
81 |
+
from ultralytics import YOLO
|
82 |
+
model = YOLO(model='mahimairaja/people-track-x-model', )
|
83 |
+
results = model(frame, imgsz=1280)[0]
|
84 |
+
detections = sv.Detections.from_yolov8(results)
|
85 |
+
detections = detections[detections.class_id == 0]
|
86 |
+
zone.trigger(detections=detections)
|
87 |
+
|
88 |
+
box_annotator = sv.BoxAnnotator(thickness=2, text_thickness=1, text_scale=0.5, text_padding = 2)
|
89 |
+
zone_annotator = sv.PolygonZoneAnnotator(zone=zone, color=sv.Color.white())
|
90 |
+
|
91 |
+
labels = [
|
92 |
+
f"{model.model.names[class_id]} {confidence :0.2f}"
|
93 |
+
for _, confidence, class_id,_
|
94 |
+
in detections
|
95 |
+
]
|
96 |
+
|
97 |
+
frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
|
98 |
+
frame = zone_annotator.annotate(scene=frame)
|
99 |
+
|
100 |
+
end_time = time.time()
|
101 |
+
fps = 1 / (end_time - start_time)
|
102 |
+
global fps_set
|
103 |
+
fps_set.add(fps)
|
104 |
+
cv2.putText(frame, "FPS: " + str(int(fps)), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
105 |
+
|
106 |
+
global frame_count
|
107 |
+
global startSeconds
|
108 |
+
global firstFrame
|
109 |
+
frame_count = frame_count + 1
|
110 |
+
|
111 |
+
if firstFrame :
|
112 |
+
writeCSV(startSeconds.strftime('%M:%S'), len(labels))
|
113 |
+
firstFrame = False
|
114 |
+
my_time = videoFPS * int(max(list(fps_set)))
|
115 |
+
if frame_count == my_time:
|
116 |
+
startSeconds += datetime.timedelta(seconds=2)
|
117 |
+
writeCSV(startSeconds.strftime('%M:%S'), len(labels))
|
118 |
+
frame_count = 0
|
119 |
+
return frame
|
120 |
+
|
121 |
+
def writeCSV(startSeconds, count):
|
122 |
+
"""
|
123 |
+
Writes the counts into a csv file
|
124 |
+
using 'Time' and 'Counts'.
|
125 |
+
"""
|
126 |
+
with open('data/density.csv', mode='a', newline='') as csvfile:
|
127 |
+
csvwriter = csv.writer(csvfile)
|
128 |
+
csvwriter.writerow([startSeconds, count])
|
129 |
+
|
130 |
+
def detect(imgPath, ) -> int:
|
131 |
+
"""
|
132 |
+
Detects the person in a image using
|
133 |
+
'Image path' and 'Threshold confidence'
|
134 |
+
"""
|
135 |
+
from ultralytics import YOLO
|
136 |
+
args = parse_arguments()
|
137 |
+
frame_width, frame_height = args.webcam_resolution
|
138 |
+
model = YOLO(model='mahimairaja/people-track-x-model',)
|
139 |
+
frame = cv2.imread(imgPath)
|
140 |
+
|
141 |
+
box_annotator = sv.BoxAnnotator(
|
142 |
+
thickness = 1,
|
143 |
+
text_thickness = 1,
|
144 |
+
text_scale = 0.5,
|
145 |
+
text_padding = 2
|
146 |
+
)
|
147 |
+
|
148 |
+
height, width, channels = frame.shape
|
149 |
+
|
150 |
+
IMG_SIDES = np.array([
|
151 |
+
[0,0],
|
152 |
+
[width, 0],
|
153 |
+
[width, height],
|
154 |
+
[0,height]
|
155 |
+
])
|
156 |
+
zone = sv.PolygonZone(polygon=IMG_SIDES, frame_resolution_wh=tuple(args.webcam_resolution))
|
157 |
+
zone_annotator = sv.PolygonZoneAnnotator(zone=zone, color=sv.Color.white())
|
158 |
+
|
159 |
+
result = model(frame)[0]
|
160 |
+
detection = sv.Detections.from_yolov8(result)
|
161 |
+
|
162 |
+
labels = [
|
163 |
+
f"{model.model.names[class_id]} {confidence :0.2f}"
|
164 |
+
for _, confidence, class_id,_
|
165 |
+
in detection
|
166 |
+
]
|
167 |
+
|
168 |
+
print(f"The count of people in the image is {len(labels)}")
|
169 |
+
|
170 |
+
frame = box_annotator.annotate(scene = frame, detections = detection, labels = labels)
|
171 |
+
|
172 |
+
zone.trigger(detections=detection)
|
173 |
+
frame = zone_annotator.annotate(scene=frame)
|
174 |
+
cv2.imwrite('data/result.jpg', frame)
|
175 |
+
return len(labels)
|
176 |
+
|
177 |
+
def detectVideo(videoPath, ) :
|
178 |
+
"""
|
179 |
+
Detects the person in a Video using
|
180 |
+
'Video path' and 'Threshold confidence'
|
181 |
+
"""
|
182 |
+
video_info = sv.VideoInfo.from_video_path(videoPath)
|
183 |
+
global videoFPS, videoWidth, videoHeight
|
184 |
+
videoFPS = video_info.fps
|
185 |
+
videoHeight = video_info.height
|
186 |
+
videoWidth = video_info.width
|
187 |
+
|
188 |
+
with open('data/density.csv', 'w', newline='') as csvfile:
|
189 |
+
csvwriter = csv.writer(csvfile)
|
190 |
+
csvwriter.writerow(['Time', 'Count'])
|
191 |
+
|
192 |
+
sv.process_video(source_path=videoPath, target_path="data/output.mp4", callback=process_frame)
|
193 |
+
|
194 |
+
def getDataframe():
|
195 |
+
"""
|
196 |
+
Reads and returns the dataframe from csv file
|
197 |
+
"""
|
198 |
+
df = pd.read_csv('data/density.csv')
|
199 |
+
return df
|
200 |
+
|
201 |
+
def checkStart() :
|
202 |
+
"""
|
203 |
+
Reads and return the flag status
|
204 |
+
from signal file.
|
205 |
+
"""
|
206 |
+
f = open('data/signal.json','r')
|
207 |
+
data = json.load(f)
|
208 |
+
f.close()
|
209 |
+
return data['initiate']
|
210 |
+
|
211 |
+
def doneSetup():
|
212 |
+
"""
|
213 |
+
Reads and Turn the Flag to off,
|
214 |
+
to indicate that initial setup is done
|
215 |
+
"""
|
216 |
+
f = open('data/signal.json','r')
|
217 |
+
data = json.load(f)
|
218 |
+
f.close()
|
219 |
+
data['initiate'] = 0
|
220 |
+
with open("data/signal.json", "w") as outfile:
|
221 |
+
json.dump(data, outfile)
|
222 |
+
outfile.close()
|
223 |
+
|
224 |
+
def getFlag():
|
225 |
+
"""
|
226 |
+
Reads and return the flag value,
|
227 |
+
"""
|
228 |
+
f = open('data/signal.json','r')
|
229 |
+
data = json.load(f)
|
230 |
+
f.close()
|
231 |
+
return data['Flag']
|
232 |
+
|
233 |
+
def setFlag():
|
234 |
+
"""
|
235 |
+
Reads and Sets the Flag to Off,
|
236 |
+
To restrict processing the video.
|
237 |
+
"""
|
238 |
+
f = open('data/signal.json','r')
|
239 |
+
data = json.load(f)
|
240 |
+
f.close()
|
241 |
+
data['Flag'] = 0
|
242 |
+
with open("data/signal.json", "w") as outfile:
|
243 |
+
json.dump(data, outfile)
|
244 |
+
outfile.close()
|
245 |
+
|
246 |
+
def resetFlag():
|
247 |
+
"""
|
248 |
+
Reads and Sets the Flag to On,
|
249 |
+
To allow processing the video.
|
250 |
+
"""
|
251 |
+
f = open('data/signal.json','r')
|
252 |
+
data = json.load(f)
|
253 |
+
f.close()
|
254 |
+
data['Flag'] = 1
|
255 |
+
with open("data/signal.json", "w") as outfile:
|
256 |
+
json.dump(data, outfile)
|
257 |
+
outfile.close()
|