Ibtehaj10 commited on
Commit
817b830
1 Parent(s): 5af1d47

Upload 2 files

Browse files
Files changed (2) hide show
  1. Login.py +694 -0
  2. screenshot/Login.py +694 -0
Login.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import datetime
3
+ import imutils
4
+ import numpy as np
5
+ from centroidtracker import CentroidTracker
6
+ import pandas as pd
7
+ import torch
8
+ import streamlit as st
9
+ import mediapipe as mp
10
+ import cv2 as cv
11
+ import numpy as np
12
+ import tempfile
13
+ import time
14
+ from PIL import Image
15
+ import pandas as pd
16
+ import torch
17
+ import base64
18
+ import streamlit.components.v1 as components
19
+ import csv
20
+ import pickle
21
+ from pathlib import Path
22
+ import streamlit_authenticator as stauth
23
+ import os
24
+ import csv
25
+ from streamlit_option_menu import option_menu
26
+ # x-x-x-x-x-x-x-x-x-x-x-x-x-x LOGIN FORM x-x-x-x-x-x-x-x-x
27
+
28
+
29
+ import streamlit as st
30
+ import pandas as pd
31
+ import hashlib
32
+ import sqlite3
33
+ #
34
+
35
+ import pickle
36
+ from pathlib import Path
37
+ import streamlit_authenticator as stauth
38
+ import pyautogui
39
+
40
+ # print("Done !!!")
41
+
42
+ data = ["student Count",'Date','Id','Mobile','Watch']
43
+ with open('final.csv', 'w') as file:
44
+ writer = csv.writer(file)
45
+ writer.writerow(data)
46
+
47
+
48
+ # # l1 = []
49
+ # # l2 = []
50
+ # # if st.button('signup'):
51
+
52
+
53
+ # # usernames = st.text_input('Username')
54
+ # # pwd = st.text_input('Password')
55
+ # # l1.append(usernames)
56
+ # # l2.append(pwd)
57
+
58
+ # # names = ["dmin", "ser"]
59
+ # # if st.button("signupsss"):
60
+ # # username =l1
61
+
62
+ # # password =l2
63
+
64
+ # # hashed_passwords =stauth.Hasher(password).generate()
65
+
66
+ # # file_path = Path(__file__).parent / "hashed_pw.pkl"
67
+
68
+ # # with file_path.open("wb") as file:
69
+ # # pickle.dump(hashed_passwords, file)
70
+
71
+
72
+ # # elif st.button('Logins'):
73
+ # names = ['dmin', 'ser']
74
+
75
+ # username = []
76
+
77
+ # file_path = Path(__file__).parent / 'hashed_pw.pkl'
78
+
79
+ # with file_path.open('rb') as file:
80
+ # hashed_passwords = pickle.load(file)
81
+
82
+ # authenticator = stauth.Authenticate(names,username,hashed_passwords,'Cheating Detection','abcdefg',cookie_expiry_days=180)
83
+
84
+ # name,authentication_status,username= authenticator.login('Login','main')
85
+
86
+
87
+ # if authentication_status == False:
88
+ # st.error('Username/Password is incorrect')
89
+
90
+ # if authentication_status == None:
91
+ # st.error('Please enter a username and password')
92
+
93
+ @st.experimental_memo
94
+ def get_img_as_base64(file):
95
+ with open(file, "rb") as f:
96
+ data = f.read()
97
+ return base64.b64encode(data).decode()
98
+
99
+
100
+ #img = get_img_as_base64("/home/anas/PersonTracking/WebUI/attendence.jpg")
101
+
102
+ page_bg_img = f"""
103
+ <style>
104
+ [data-testid="stAppViewContainer"] > .main {{
105
+ background-image: url("https://www.xmple.com/wallpaper/blue-gradient-black-linear-1920x1080-c2-87cefa-000000-a-180-f-14.svg");
106
+ background-size: 180%;
107
+ background-position: top left;
108
+ background-repeat: no-repeat;
109
+ background-attachment: local;
110
+ }}
111
+
112
+ [data-testid="stHeader"] {{
113
+ background: rgba(0,0,0,0);
114
+ }}
115
+ [data-testid="stToolbar"] {{
116
+ right: 2rem;
117
+ }}
118
+ </style>
119
+ """
120
+
121
+ st.markdown(page_bg_img, unsafe_allow_html=True)
122
+ files = pd.read_csv('LoginStatus.csv')
123
+
124
+
125
+ idS = list(files['Id'])
126
+ Pwd = list(files['Password'].astype(str))
127
+
128
+ # print(type(Pwd))
129
+ ids = st.sidebar.text_input('Enter a username')
130
+ Pswd = st.sidebar.text_input('Enter a password',type="password",key="password")
131
+
132
+ # print('list : ',type(Pwd))
133
+
134
+
135
+
136
+ if (ids in idS) and(str(Pswd) in Pwd):
137
+
138
+ # st.empty()
139
+ date_time = time.strftime("%b %d %Y %-I:%M %p")
140
+ date = date_time.split()
141
+ dates = date[0:3]
142
+ times = date[3:5]
143
+ # x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-xAPPLICACTION -x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
144
+
145
+ def non_max_suppression_fast(boxes, overlapThresh):
146
+ try:
147
+ if len(boxes) == 0:
148
+ return []
149
+
150
+ if boxes.dtype.kind == "i":
151
+ boxes = boxes.astype("float")
152
+
153
+ pick = []
154
+
155
+ x1 = boxes[:, 0]
156
+ y1 = boxes[:, 1]
157
+ x2 = boxes[:, 2]
158
+ y2 = boxes[:, 3]
159
+
160
+ area = (x2 - x1 + 1) * (y2 - y1 + 1)
161
+ idxs = np.argsort(y2)
162
+
163
+ while len(idxs) > 0:
164
+ last = len(idxs) - 1
165
+ i = idxs[last]
166
+ pick.append(i)
167
+
168
+ xx1 = np.maximum(x1[i], x1[idxs[:last]])
169
+ yy1 = np.maximum(y1[i], y1[idxs[:last]])
170
+ xx2 = np.minimum(x2[i], x2[idxs[:last]])
171
+ yy2 = np.minimum(y2[i], y2[idxs[:last]])
172
+
173
+ w = np.maximum(0, xx2 - xx1 + 1)
174
+ h = np.maximum(0, yy2 - yy1 + 1)
175
+
176
+ overlap = (w * h) / area[idxs[:last]]
177
+
178
+ idxs = np.delete(idxs, np.concatenate(([last],
179
+ np.where(overlap > overlapThresh)[0])))
180
+
181
+ return boxes[pick].astype("int")
182
+ except Exception as e:
183
+ print("Exception occurred in non_max_suppression : {}".format(e))
184
+
185
+
186
+ protopath = "MobileNetSSD_deploy.prototxt"
187
+ modelpath = "MobileNetSSD_deploy.caffemodel"
188
+ detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
189
+ # Only enable it if you are using OpenVino environment
190
+ # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
191
+ # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
192
+
193
+
194
+ CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
195
+ "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
196
+ "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
197
+ "sofa", "train", "tvmonitor"]
198
+
199
+ tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
200
+
201
+ st.markdown(
202
+ """
203
+ <style>
204
+ [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
205
+ width: 350px
206
+ }
207
+ [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
208
+ width: 350px
209
+ margin-left: -350px
210
+ }
211
+ </style>
212
+ """,
213
+ unsafe_allow_html=True,
214
+ )
215
+ hide_streamlit_style = """
216
+ <style>
217
+ #MainMenu {visibility: hidden;}
218
+ footer {visibility: hidden;}
219
+ </style>
220
+ """
221
+ st.markdown(hide_streamlit_style, unsafe_allow_html=True)
222
+
223
+
224
+ # Resize Images to fit Container
225
+ @st.cache()
226
+ # Get Image Dimensions
227
+ def image_resize(image, width=None, height=None, inter=cv.INTER_AREA):
228
+ dim = None
229
+ (h,w) = image.shape[:2]
230
+
231
+ if width is None and height is None:
232
+ return image
233
+
234
+ if width is None:
235
+ r = width/float(w)
236
+ dim = (int(w*r),height)
237
+
238
+ else:
239
+ r = width/float(w)
240
+ dim = width, int(h*r)
241
+
242
+ # Resize image
243
+ resized = cv.resize(image,dim,interpolation=inter)
244
+
245
+ return resized
246
+
247
+ # About Page
248
+ # authenticator.logout('Logout')
249
+ EXAMPLE_NO = 3
250
+
251
+
252
+ def streamlit_menu(example=1):
253
+ if example == 1:
254
+ # 1. as sidebar menu
255
+ with st.sidebar:
256
+ selected = option_menu(
257
+ menu_title="Main Menu", # required
258
+ options=["Home", "Projects", "Contact"], # required
259
+ icons=["house", "book", "envelope"], # optional
260
+ menu_icon="cast", # optional
261
+ default_index=0, # optional
262
+ )
263
+ return selected
264
+
265
+ if example == 2:
266
+ # 2. horizontal menu w/o custom style
267
+ selected = option_menu(
268
+ menu_title=None, # required
269
+ options=["Home", "Projects", "Contact"], # required
270
+ icons=["house", "book", "envelope"], # optional
271
+ menu_icon="cast", # optional
272
+ default_index=0, # optional
273
+ orientation="horizontal",
274
+ )
275
+ return selected
276
+
277
+ if example == 3:
278
+ # 2. horizontal menu with custom style
279
+ selected = option_menu(
280
+ menu_title=None, # required
281
+ options=["Home", "Projects", "Contact"], # required
282
+ icons=["house", "book", "envelope"], # optional
283
+ menu_icon="cast", # optional
284
+ default_index=0, # optional
285
+ orientation="horizontal",
286
+ styles={
287
+ "container": {"padding": "0!important", "background-color": "#eaeaea"},
288
+ "icon": {"color": "#080602", "font-size": "18px"},
289
+ "nav-link": {
290
+ "font-size": "18px",
291
+ "text-align": "left",
292
+ "color": "#000000",
293
+ "margin": "0px",
294
+ "--hover-color": "#E1A031",
295
+ },
296
+ "nav-link-selected": {"background-color": "#ffffff"},
297
+ },
298
+ )
299
+ return selected
300
+
301
+
302
+ selected = streamlit_menu(example=EXAMPLE_NO)
303
+
304
+ if selected == "Home":
305
+ st.title(f"You have selected {selected}")
306
+ # if selected == "Projects":
307
+ # st.title(f"You have selected {selected}")
308
+ if selected == "Contact":
309
+ st.title(f"You have selected {selected}")
310
+ # app_mode = st.sidebar.selectbox(
311
+ # 'App Mode',
312
+ # ['Application']
313
+ # )
314
+ if selected == 'Projects':
315
+ # 2. horizontal menu with custom style
316
+ # selected = option_menu(
317
+ # menu_title=None, # required
318
+ # options=["Home", "Projects", "Contact"], # required
319
+ # icons=["house", "book", "envelope"], # optional
320
+ # menu_icon="cast", # optional
321
+ # default_index=0, # optional
322
+ # orientation="horizontal",
323
+ # styles={
324
+ # "container": {"padding": "0!important", "background-color": "#fafafa"},
325
+ # "icon": {"color": "orange", "font-size": "25px"},
326
+ # "nav-link": {
327
+ # "font-size": "25px",
328
+ # "text-align": "left",
329
+ # "margin": "0px",
330
+ # "--hover-color": "#eee",
331
+ # },
332
+ # "nav-link-selected": {"background-color": "blue"},
333
+ # },
334
+ # )
335
+ # if app_mode == 'About':
336
+ # st.title('About Product And Team')
337
+ # st.markdown('''
338
+ # Imran Bhai Project
339
+ # ''')
340
+ # st.markdown(
341
+ # """
342
+ # <style>
343
+ # [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
344
+ # width: 350px
345
+ # }
346
+ # [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
347
+ # width: 350px
348
+ # margin-left: -350px
349
+ # }
350
+ # </style>
351
+ # """,
352
+ # unsafe_allow_html=True,
353
+ # )
354
+
355
+
356
+
357
+
358
+ # elif app_mode == 'Application':
359
+
360
+ st.set_option('deprecation.showfileUploaderEncoding', False)
361
+
362
+ use_webcam = "pass"
363
+ # record = st.sidebar.checkbox("Record Video")
364
+
365
+ # if record:
366
+ # st.checkbox('Recording', True)
367
+
368
+ # drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
369
+
370
+ # st.sidebar.markdown('---')
371
+
372
+ # ## Add Sidebar and Window style
373
+ # st.markdown(
374
+ # """
375
+ # <style>
376
+ # [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
377
+ # width: 350px
378
+ # }
379
+ # [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
380
+ # width: 350px
381
+ # margin-left: -350px
382
+ # }
383
+ # </style>
384
+ # """,
385
+ # unsafe_allow_html=True,
386
+ # )
387
+
388
+ # max_faces = st.sidebar.number_input('Maximum Number of Faces', value=5, min_value=1)
389
+ # st.sidebar.markdown('---')
390
+ # detection_confidence = st.sidebar.slider('Min Detection Confidence', min_value=0.0,max_value=1.0,value=0.5)
391
+ # tracking_confidence = st.sidebar.slider('Min Tracking Confidence', min_value=0.0,max_value=1.0,value=0.5)
392
+ # st.sidebar.markdown('---')
393
+
394
+ ## Get Video
395
+ stframe = st.empty()
396
+ video_file_buffer = st.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v'])
397
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
398
+
399
+
400
+ if not video_file_buffer:
401
+ if use_webcam:
402
+ video = cv.VideoCapture(0)
403
+ else:
404
+ try:
405
+ video = cv.VideoCapture(1)
406
+ temp_file.name = video
407
+ except:
408
+ pass
409
+ else:
410
+ temp_file.write(video_file_buffer.read())
411
+ video = cv.VideoCapture(temp_file.name)
412
+
413
+ width = int(video.get(cv.CAP_PROP_FRAME_WIDTH))
414
+ height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT))
415
+ fps_input = int(video.get(cv.CAP_PROP_FPS))
416
+
417
+ ## Recording
418
+ codec = cv.VideoWriter_fourcc('a','v','c','1')
419
+ out = cv.VideoWriter('output1.mp4', codec, fps_input, (width,height))
420
+
421
+ # st.sidebar.text('Input Video')
422
+ # st.sidebar.video(temp_file.name)
423
+
424
+ fps = 0
425
+ i = 0
426
+
427
+ drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
428
+
429
+ kpil, kpil2, kpil3,kpil4,kpil5, kpil6 = st.columns(6)
430
+
431
+ with kpil:
432
+ st.markdown('**Frame Rate**')
433
+ kpil_text = st.markdown('0')
434
+
435
+ with kpil2:
436
+ st.markdown('**detection ID**')
437
+ kpil2_text = st.markdown('0')
438
+
439
+ with kpil3:
440
+ st.markdown('**Mobile**')
441
+ kpil3_text = st.markdown('0')
442
+ with kpil4:
443
+ st.markdown('**Watch**')
444
+ kpil4_text = st.markdown('0')
445
+ with kpil5:
446
+ st.markdown('**Count**')
447
+ kpil5_text = st.markdown('0')
448
+ with kpil6:
449
+ st.markdown('**Img Res**')
450
+ kpil6_text = st.markdown('0')
451
+
452
+
453
+
454
+ st.markdown('<hr/>', unsafe_allow_html=True)
455
+ # try:
456
+ def main():
457
+ db = {}
458
+
459
+ # cap = cv2.VideoCapture('//home//anas//PersonTracking//WebUI//movement.mp4')
460
+ path='/usr/local/lib/python3.10/dist-packages/yolo0vs5/yolov5s-int8.tflite'
461
+ #count=0
462
+ custom = 'yolov5s'
463
+
464
+ model = torch.hub.load('/usr/local/lib/python3.10/dist-packages/yolovs5', custom, path,source='local',force_reload=True)
465
+
466
+ b=model.names[0] = 'person'
467
+ mobile = model.names[67] = 'cell phone'
468
+ watch = model.names[75] = 'clock'
469
+
470
+ fps_start_time = datetime.datetime.now()
471
+ fps = 0
472
+ size=416
473
+
474
+ count=0
475
+ counter=0
476
+
477
+
478
+ color=(0,0,255)
479
+
480
+ cy1=250
481
+ offset=6
482
+
483
+
484
+ pt1 = (120, 100)
485
+ pt2 = (980, 1150)
486
+ color = (0, 255, 0)
487
+
488
+ pt3 = (283, 103)
489
+ pt4 = (1500, 1150)
490
+
491
+ cy2 = 500
492
+ color = (0, 255, 0)
493
+ total_frames = 0
494
+ prevTime = 0
495
+ cur_frame = 0
496
+ count=0
497
+ counter=0
498
+ fps_start_time = datetime.datetime.now()
499
+ fps = 0
500
+ total_frames = 0
501
+ lpc_count = 0
502
+ opc_count = 0
503
+ object_id_list = []
504
+ # success = True
505
+ if st.button("Detect"):
506
+ try:
507
+ while video.isOpened():
508
+
509
+ ret, frame = video.read()
510
+ frame = imutils.resize(frame, width=600)
511
+ total_frames = total_frames + 1
512
+
513
+ (H, W) = frame.shape[:2]
514
+
515
+ blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
516
+
517
+ detector.setInput(blob)
518
+ person_detections = detector.forward()
519
+ rects = []
520
+ for i in np.arange(0, person_detections.shape[2]):
521
+ confidence = person_detections[0, 0, i, 2]
522
+ if confidence > 0.5:
523
+ idx = int(person_detections[0, 0, i, 1])
524
+
525
+ if CLASSES[idx] != "person":
526
+ continue
527
+
528
+ person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
529
+ (startX, startY, endX, endY) = person_box.astype("int")
530
+ rects.append(person_box)
531
+
532
+ boundingboxes = np.array(rects)
533
+ boundingboxes = boundingboxes.astype(int)
534
+ rects = non_max_suppression_fast(boundingboxes, 0.3)
535
+
536
+ objects = tracker.update(rects)
537
+ for (objectId, bbox) in objects.items():
538
+ x1, y1, x2, y2 = bbox
539
+ x1 = int(x1)
540
+ y1 = int(y1)
541
+ x2 = int(x2)
542
+ y2 = int(y2)
543
+
544
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
545
+ text = "ID: {}".format(objectId)
546
+ # print(text)
547
+ cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
548
+ if objectId not in object_id_list:
549
+ object_id_list.append(objectId)
550
+ fps_end_time = datetime.datetime.now()
551
+ time_diff = fps_end_time - fps_start_time
552
+ if time_diff.seconds == 0:
553
+ fps = 0.0
554
+ else:
555
+ fps = (total_frames / time_diff.seconds)
556
+
557
+ fps_text = "FPS: {:.2f}".format(fps)
558
+
559
+ cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
560
+ lpc_count = len(objects)
561
+ opc_count = len(object_id_list)
562
+
563
+ lpc_txt = "LPC: {}".format(lpc_count)
564
+ opc_txt = "OPC: {}".format(opc_count)
565
+
566
+ count += 1
567
+ if count % 4 != 0:
568
+ continue
569
+ # frame=cv.resize(frame, (600,500))
570
+ # cv2.line(frame, pt1, pt2,color,2)
571
+ # cv2.line(frame, pt3, pt4,color,2)
572
+ results = model(frame,size)
573
+ components = results.pandas().xyxy[0]
574
+ for index, row in results.pandas().xyxy[0].iterrows():
575
+ x1 = int(row['xmin'])
576
+ y1 = int(row['ymin'])
577
+ x2 = int(row['xmax'])
578
+ y2 = int(row['ymax'])
579
+ confidence = (row['confidence'])
580
+ obj = (row['class'])
581
+
582
+
583
+ # min':x1,'ymin':y1,'xmax':x2,'ymax':y2,'confidence':confidence,'Object':obj}
584
+ # if lpc_txt is not None:
585
+ # try:
586
+ # db["student Count"] = [lpc_txt]
587
+ # except:
588
+ # db["student Count"] = ['N/A']
589
+ if obj == 0:
590
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
591
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
592
+ rectcenter = int(rectx1),int(recty1)
593
+ cx = rectcenter[0]
594
+ cy = rectcenter[1]
595
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
596
+ cv2.putText(frame,str(b), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
597
+
598
+ db["student Count"] = [lpc_txt]
599
+ db['Date'] = [date_time]
600
+ db['id'] = ['N/A']
601
+ db['Mobile']=['N/A']
602
+ db['Watch'] = ['N/A']
603
+ if cy<(cy1+offset) and cy>(cy1-offset):
604
+ DB = []
605
+ counter+=1
606
+ DB.append(counter)
607
+
608
+ ff = DB[-1]
609
+ fx = str(ff)
610
+ # cv2.line(frame, pt1, pt2,(0, 0, 255),2)
611
+ # if cy<(cy2+offset) and cy>(cy2-offset):
612
+
613
+ # cv2.line(frame, pt3, pt4,(0, 0, 255),2)
614
+ font = cv2.FONT_HERSHEY_TRIPLEX
615
+ cv2.putText(frame,fx,(50, 50),font, 1,(0, 0, 255),2,cv2.LINE_4)
616
+ cv2.putText(frame,"Movement",(70, 70),font, 1,(0, 0, 255),2,cv2.LINE_4)
617
+ kpil2_text.write(f"<h5 style='text-align: left; color:red;'>{text}</h5>", unsafe_allow_html=True)
618
+
619
+
620
+ db['id'] = [text]
621
+ name = "/screenshot/"+str(date_time) + '.jpg'
622
+ print ('Creating...' + name)
623
+ cv2.imwrite(name, frame)
624
+
625
+ # myScreenshot = pyautogui.screenshot()
626
+ # if st.buttn("Dowload ss"):
627
+ # myScreenshot.save(r'name.png')
628
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
629
+ if obj == 67:
630
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
631
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
632
+ rectcenter = int(rectx1),int(recty1)
633
+ cx = rectcenter[0]
634
+ cy = rectcenter[1]
635
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
636
+ cv2.putText(frame,str(mobile), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
637
+ cv2.putText(frame,'Mobile',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4)
638
+ kpil3_text.write(f"<h5 style='text-align: left; color:red;'>{mobile}{text}</h5>", unsafe_allow_html=True)
639
+ db['Mobile']=mobile+' '+text
640
+ name = "/screenshot/"+str(date_time) + '.jpg'
641
+ print ('Creating...' + name)
642
+
643
+ # writing the extracted images
644
+ cv2.imwrite(name, frame)
645
+
646
+ # myScreenshot = pyautogui.screenshot()
647
+ # if st.buttn("Dowload ss"):
648
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
649
+ # myScreenshot.save(r'name.png')
650
+
651
+ if obj == 75:
652
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
653
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
654
+ rectcenter = int(rectx1),int(recty1)
655
+ cx = rectcenter[0]
656
+ cy = rectcenter[1]
657
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
658
+ cv2.putText(frame,str(watch), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
659
+ cv2.putText(frame,'Watch',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4)
660
+ kpil6_text.write(f"<h5 style='text-align: left; color:red;'>{watch}</h5>", unsafe_allow_html=True)
661
+
662
+
663
+ db['Watch']=watch
664
+ name = "/screenshot/"+str(date_time) + '.jpg'
665
+ print ('Creating...' + name)
666
+ cv2.imwrite(name, frame)
667
+
668
+ # writing the extracted images
669
+
670
+ # myScreenshot = pyautogui.screenshot()
671
+ # if st.buttn("Dowload ss"):
672
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
673
+ # myScreenshot.save(r'name.png')
674
+
675
+
676
+
677
+ kpil_text.write(f"<h5 style='text-align: left; color:red;'>{int(fps)}</h5>", unsafe_allow_html=True)
678
+ kpil5_text.write(f"<h5 style='text-align: left; color:red;'>{lpc_txt}</h5>", unsafe_allow_html=True)
679
+ kpil6_text.write(f"<h5 style='text-align: left; color:red;'>{width*height}</h5>",
680
+ unsafe_allow_html=True)
681
+
682
+
683
+ frame = cv.resize(frame,(0,0), fx=0.8, fy=0.8)
684
+ frame = image_resize(image=frame, width=640)
685
+ stframe.image(frame,channels='BGR', use_column_width=True)
686
+ df = pd.DataFrame(db)
687
+ df.to_csv('final.csv',mode='a',header=False,index=False)
688
+ except:
689
+ pass
690
+ with open('final.csv') as f:
691
+ st.download_button(label = 'Download Cheating Report',data=f,file_name='data.csv')
692
+
693
+ os.remove("final.csv")
694
+ main()
screenshot/Login.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import datetime
3
+ import imutils
4
+ import numpy as np
5
+ from centroidtracker import CentroidTracker
6
+ import pandas as pd
7
+ import torch
8
+ import streamlit as st
9
+ import mediapipe as mp
10
+ import cv2 as cv
11
+ import numpy as np
12
+ import tempfile
13
+ import time
14
+ from PIL import Image
15
+ import pandas as pd
16
+ import torch
17
+ import base64
18
+ import streamlit.components.v1 as components
19
+ import csv
20
+ import pickle
21
+ from pathlib import Path
22
+ import streamlit_authenticator as stauth
23
+ import os
24
+ import csv
25
+ from streamlit_option_menu import option_menu
26
+ # x-x-x-x-x-x-x-x-x-x-x-x-x-x LOGIN FORM x-x-x-x-x-x-x-x-x
27
+
28
+
29
+ import streamlit as st
30
+ import pandas as pd
31
+ import hashlib
32
+ import sqlite3
33
+ #
34
+
35
+ import pickle
36
+ from pathlib import Path
37
+ import streamlit_authenticator as stauth
38
+ import pyautogui
39
+
40
+ # print("Done !!!")
41
+
42
+ data = ["student Count",'Date','Id','Mobile','Watch']
43
+ with open('final.csv', 'w') as file:
44
+ writer = csv.writer(file)
45
+ writer.writerow(data)
46
+
47
+
48
+ # # l1 = []
49
+ # # l2 = []
50
+ # # if st.button('signup'):
51
+
52
+
53
+ # # usernames = st.text_input('Username')
54
+ # # pwd = st.text_input('Password')
55
+ # # l1.append(usernames)
56
+ # # l2.append(pwd)
57
+
58
+ # # names = ["dmin", "ser"]
59
+ # # if st.button("signupsss"):
60
+ # # username =l1
61
+
62
+ # # password =l2
63
+
64
+ # # hashed_passwords =stauth.Hasher(password).generate()
65
+
66
+ # # file_path = Path(__file__).parent / "hashed_pw.pkl"
67
+
68
+ # # with file_path.open("wb") as file:
69
+ # # pickle.dump(hashed_passwords, file)
70
+
71
+
72
+ # # elif st.button('Logins'):
73
+ # names = ['dmin', 'ser']
74
+
75
+ # username = []
76
+
77
+ # file_path = Path(__file__).parent / 'hashed_pw.pkl'
78
+
79
+ # with file_path.open('rb') as file:
80
+ # hashed_passwords = pickle.load(file)
81
+
82
+ # authenticator = stauth.Authenticate(names,username,hashed_passwords,'Cheating Detection','abcdefg',cookie_expiry_days=180)
83
+
84
+ # name,authentication_status,username= authenticator.login('Login','main')
85
+
86
+
87
+ # if authentication_status == False:
88
+ # st.error('Username/Password is incorrect')
89
+
90
+ # if authentication_status == None:
91
+ # st.error('Please enter a username and password')
92
+
93
+ @st.experimental_memo
94
+ def get_img_as_base64(file):
95
+ with open(file, "rb") as f:
96
+ data = f.read()
97
+ return base64.b64encode(data).decode()
98
+
99
+
100
+ #img = get_img_as_base64("/home/anas/PersonTracking/WebUI/attendence.jpg")
101
+
102
+ page_bg_img = f"""
103
+ <style>
104
+ [data-testid="stAppViewContainer"] > .main {{
105
+ background-image: url("https://www.xmple.com/wallpaper/blue-gradient-black-linear-1920x1080-c2-87cefa-000000-a-180-f-14.svg");
106
+ background-size: 180%;
107
+ background-position: top left;
108
+ background-repeat: no-repeat;
109
+ background-attachment: local;
110
+ }}
111
+
112
+ [data-testid="stHeader"] {{
113
+ background: rgba(0,0,0,0);
114
+ }}
115
+ [data-testid="stToolbar"] {{
116
+ right: 2rem;
117
+ }}
118
+ </style>
119
+ """
120
+
121
+ st.markdown(page_bg_img, unsafe_allow_html=True)
122
+ files = pd.read_csv('LoginStatus.csv')
123
+
124
+
125
+ idS = list(files['Id'])
126
+ Pwd = list(files['Password'].astype(str))
127
+
128
+ # print(type(Pwd))
129
+ ids = st.sidebar.text_input('Enter a username')
130
+ Pswd = st.sidebar.text_input('Enter a password',type="password",key="password")
131
+
132
+ # print('list : ',type(Pwd))
133
+
134
+
135
+
136
+ if (ids in idS) and(str(Pswd) in Pwd):
137
+
138
+ # st.empty()
139
+ date_time = time.strftime("%b %d %Y %-I:%M %p")
140
+ date = date_time.split()
141
+ dates = date[0:3]
142
+ times = date[3:5]
143
+ # x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-xAPPLICACTION -x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
144
+
145
+ def non_max_suppression_fast(boxes, overlapThresh):
146
+ try:
147
+ if len(boxes) == 0:
148
+ return []
149
+
150
+ if boxes.dtype.kind == "i":
151
+ boxes = boxes.astype("float")
152
+
153
+ pick = []
154
+
155
+ x1 = boxes[:, 0]
156
+ y1 = boxes[:, 1]
157
+ x2 = boxes[:, 2]
158
+ y2 = boxes[:, 3]
159
+
160
+ area = (x2 - x1 + 1) * (y2 - y1 + 1)
161
+ idxs = np.argsort(y2)
162
+
163
+ while len(idxs) > 0:
164
+ last = len(idxs) - 1
165
+ i = idxs[last]
166
+ pick.append(i)
167
+
168
+ xx1 = np.maximum(x1[i], x1[idxs[:last]])
169
+ yy1 = np.maximum(y1[i], y1[idxs[:last]])
170
+ xx2 = np.minimum(x2[i], x2[idxs[:last]])
171
+ yy2 = np.minimum(y2[i], y2[idxs[:last]])
172
+
173
+ w = np.maximum(0, xx2 - xx1 + 1)
174
+ h = np.maximum(0, yy2 - yy1 + 1)
175
+
176
+ overlap = (w * h) / area[idxs[:last]]
177
+
178
+ idxs = np.delete(idxs, np.concatenate(([last],
179
+ np.where(overlap > overlapThresh)[0])))
180
+
181
+ return boxes[pick].astype("int")
182
+ except Exception as e:
183
+ print("Exception occurred in non_max_suppression : {}".format(e))
184
+
185
+
186
+ protopath = "MobileNetSSD_deploy.prototxt"
187
+ modelpath = "MobileNetSSD_deploy.caffemodel"
188
+ detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
189
+ # Only enable it if you are using OpenVino environment
190
+ # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
191
+ # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
192
+
193
+
194
+ CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
195
+ "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
196
+ "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
197
+ "sofa", "train", "tvmonitor"]
198
+
199
+ tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
200
+
201
+ st.markdown(
202
+ """
203
+ <style>
204
+ [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
205
+ width: 350px
206
+ }
207
+ [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
208
+ width: 350px
209
+ margin-left: -350px
210
+ }
211
+ </style>
212
+ """,
213
+ unsafe_allow_html=True,
214
+ )
215
+ hide_streamlit_style = """
216
+ <style>
217
+ #MainMenu {visibility: hidden;}
218
+ footer {visibility: hidden;}
219
+ </style>
220
+ """
221
+ st.markdown(hide_streamlit_style, unsafe_allow_html=True)
222
+
223
+
224
+ # Resize Images to fit Container
225
+ @st.cache()
226
+ # Get Image Dimensions
227
+ def image_resize(image, width=None, height=None, inter=cv.INTER_AREA):
228
+ dim = None
229
+ (h,w) = image.shape[:2]
230
+
231
+ if width is None and height is None:
232
+ return image
233
+
234
+ if width is None:
235
+ r = width/float(w)
236
+ dim = (int(w*r),height)
237
+
238
+ else:
239
+ r = width/float(w)
240
+ dim = width, int(h*r)
241
+
242
+ # Resize image
243
+ resized = cv.resize(image,dim,interpolation=inter)
244
+
245
+ return resized
246
+
247
+ # About Page
248
+ # authenticator.logout('Logout')
249
+ EXAMPLE_NO = 3
250
+
251
+
252
+ def streamlit_menu(example=1):
253
+ if example == 1:
254
+ # 1. as sidebar menu
255
+ with st.sidebar:
256
+ selected = option_menu(
257
+ menu_title="Main Menu", # required
258
+ options=["Home", "Projects", "Contact"], # required
259
+ icons=["house", "book", "envelope"], # optional
260
+ menu_icon="cast", # optional
261
+ default_index=0, # optional
262
+ )
263
+ return selected
264
+
265
+ if example == 2:
266
+ # 2. horizontal menu w/o custom style
267
+ selected = option_menu(
268
+ menu_title=None, # required
269
+ options=["Home", "Projects", "Contact"], # required
270
+ icons=["house", "book", "envelope"], # optional
271
+ menu_icon="cast", # optional
272
+ default_index=0, # optional
273
+ orientation="horizontal",
274
+ )
275
+ return selected
276
+
277
+ if example == 3:
278
+ # 2. horizontal menu with custom style
279
+ selected = option_menu(
280
+ menu_title=None, # required
281
+ options=["Home", "Projects", "Contact"], # required
282
+ icons=["house", "book", "envelope"], # optional
283
+ menu_icon="cast", # optional
284
+ default_index=0, # optional
285
+ orientation="horizontal",
286
+ styles={
287
+ "container": {"padding": "0!important", "background-color": "#eaeaea"},
288
+ "icon": {"color": "#080602", "font-size": "18px"},
289
+ "nav-link": {
290
+ "font-size": "18px",
291
+ "text-align": "left",
292
+ "color": "#000000",
293
+ "margin": "0px",
294
+ "--hover-color": "#E1A031",
295
+ },
296
+ "nav-link-selected": {"background-color": "#ffffff"},
297
+ },
298
+ )
299
+ return selected
300
+
301
+
302
+ selected = streamlit_menu(example=EXAMPLE_NO)
303
+
304
+ if selected == "Home":
305
+ st.title(f"You have selected {selected}")
306
+ # if selected == "Projects":
307
+ # st.title(f"You have selected {selected}")
308
+ if selected == "Contact":
309
+ st.title(f"You have selected {selected}")
310
+ # app_mode = st.sidebar.selectbox(
311
+ # 'App Mode',
312
+ # ['Application']
313
+ # )
314
+ if selected == 'Projects':
315
+ # 2. horizontal menu with custom style
316
+ # selected = option_menu(
317
+ # menu_title=None, # required
318
+ # options=["Home", "Projects", "Contact"], # required
319
+ # icons=["house", "book", "envelope"], # optional
320
+ # menu_icon="cast", # optional
321
+ # default_index=0, # optional
322
+ # orientation="horizontal",
323
+ # styles={
324
+ # "container": {"padding": "0!important", "background-color": "#fafafa"},
325
+ # "icon": {"color": "orange", "font-size": "25px"},
326
+ # "nav-link": {
327
+ # "font-size": "25px",
328
+ # "text-align": "left",
329
+ # "margin": "0px",
330
+ # "--hover-color": "#eee",
331
+ # },
332
+ # "nav-link-selected": {"background-color": "blue"},
333
+ # },
334
+ # )
335
+ # if app_mode == 'About':
336
+ # st.title('About Product And Team')
337
+ # st.markdown('''
338
+ # Imran Bhai Project
339
+ # ''')
340
+ # st.markdown(
341
+ # """
342
+ # <style>
343
+ # [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
344
+ # width: 350px
345
+ # }
346
+ # [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
347
+ # width: 350px
348
+ # margin-left: -350px
349
+ # }
350
+ # </style>
351
+ # """,
352
+ # unsafe_allow_html=True,
353
+ # )
354
+
355
+
356
+
357
+
358
+ # elif app_mode == 'Application':
359
+
360
+ st.set_option('deprecation.showfileUploaderEncoding', False)
361
+
362
+ use_webcam = "pass"
363
+ # record = st.sidebar.checkbox("Record Video")
364
+
365
+ # if record:
366
+ # st.checkbox('Recording', True)
367
+
368
+ # drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
369
+
370
+ # st.sidebar.markdown('---')
371
+
372
+ # ## Add Sidebar and Window style
373
+ # st.markdown(
374
+ # """
375
+ # <style>
376
+ # [data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
377
+ # width: 350px
378
+ # }
379
+ # [data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
380
+ # width: 350px
381
+ # margin-left: -350px
382
+ # }
383
+ # </style>
384
+ # """,
385
+ # unsafe_allow_html=True,
386
+ # )
387
+
388
+ # max_faces = st.sidebar.number_input('Maximum Number of Faces', value=5, min_value=1)
389
+ # st.sidebar.markdown('---')
390
+ # detection_confidence = st.sidebar.slider('Min Detection Confidence', min_value=0.0,max_value=1.0,value=0.5)
391
+ # tracking_confidence = st.sidebar.slider('Min Tracking Confidence', min_value=0.0,max_value=1.0,value=0.5)
392
+ # st.sidebar.markdown('---')
393
+
394
+ ## Get Video
395
+ stframe = st.empty()
396
+ video_file_buffer = st.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v'])
397
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
398
+
399
+
400
+ if not video_file_buffer:
401
+ if use_webcam:
402
+ video = cv.VideoCapture(0)
403
+ else:
404
+ try:
405
+ video = cv.VideoCapture(1)
406
+ temp_file.name = video
407
+ except:
408
+ pass
409
+ else:
410
+ temp_file.write(video_file_buffer.read())
411
+ video = cv.VideoCapture(temp_file.name)
412
+
413
+ width = int(video.get(cv.CAP_PROP_FRAME_WIDTH))
414
+ height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT))
415
+ fps_input = int(video.get(cv.CAP_PROP_FPS))
416
+
417
+ ## Recording
418
+ codec = cv.VideoWriter_fourcc('a','v','c','1')
419
+ out = cv.VideoWriter('output1.mp4', codec, fps_input, (width,height))
420
+
421
+ # st.sidebar.text('Input Video')
422
+ # st.sidebar.video(temp_file.name)
423
+
424
+ fps = 0
425
+ i = 0
426
+
427
+ drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
428
+
429
+ kpil, kpil2, kpil3,kpil4,kpil5, kpil6 = st.columns(6)
430
+
431
+ with kpil:
432
+ st.markdown('**Frame Rate**')
433
+ kpil_text = st.markdown('0')
434
+
435
+ with kpil2:
436
+ st.markdown('**detection ID**')
437
+ kpil2_text = st.markdown('0')
438
+
439
+ with kpil3:
440
+ st.markdown('**Mobile**')
441
+ kpil3_text = st.markdown('0')
442
+ with kpil4:
443
+ st.markdown('**Watch**')
444
+ kpil4_text = st.markdown('0')
445
+ with kpil5:
446
+ st.markdown('**Count**')
447
+ kpil5_text = st.markdown('0')
448
+ with kpil6:
449
+ st.markdown('**Img Res**')
450
+ kpil6_text = st.markdown('0')
451
+
452
+
453
+
454
+ st.markdown('<hr/>', unsafe_allow_html=True)
455
+ # try:
456
+ def main():
457
+ db = {}
458
+
459
+ # cap = cv2.VideoCapture('//home//anas//PersonTracking//WebUI//movement.mp4')
460
+ path='/usr/local/lib/python3.10/dist-packages/yolo0vs5/yolov5s-int8.tflite'
461
+ #count=0
462
+ custom = 'yolov5s'
463
+
464
+ model = torch.hub.load('/usr/local/lib/python3.10/dist-packages/yolovs5', custom, path,source='local',force_reload=True)
465
+
466
+ b=model.names[0] = 'person'
467
+ mobile = model.names[67] = 'cell phone'
468
+ watch = model.names[75] = 'clock'
469
+
470
+ fps_start_time = datetime.datetime.now()
471
+ fps = 0
472
+ size=416
473
+
474
+ count=0
475
+ counter=0
476
+
477
+
478
+ color=(0,0,255)
479
+
480
+ cy1=250
481
+ offset=6
482
+
483
+
484
+ pt1 = (120, 100)
485
+ pt2 = (980, 1150)
486
+ color = (0, 255, 0)
487
+
488
+ pt3 = (283, 103)
489
+ pt4 = (1500, 1150)
490
+
491
+ cy2 = 500
492
+ color = (0, 255, 0)
493
+ total_frames = 0
494
+ prevTime = 0
495
+ cur_frame = 0
496
+ count=0
497
+ counter=0
498
+ fps_start_time = datetime.datetime.now()
499
+ fps = 0
500
+ total_frames = 0
501
+ lpc_count = 0
502
+ opc_count = 0
503
+ object_id_list = []
504
+ # success = True
505
+ if st.button("Detect"):
506
+ try:
507
+ while video.isOpened():
508
+
509
+ ret, frame = video.read()
510
+ frame = imutils.resize(frame, width=600)
511
+ total_frames = total_frames + 1
512
+
513
+ (H, W) = frame.shape[:2]
514
+
515
+ blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
516
+
517
+ detector.setInput(blob)
518
+ person_detections = detector.forward()
519
+ rects = []
520
+ for i in np.arange(0, person_detections.shape[2]):
521
+ confidence = person_detections[0, 0, i, 2]
522
+ if confidence > 0.5:
523
+ idx = int(person_detections[0, 0, i, 1])
524
+
525
+ if CLASSES[idx] != "person":
526
+ continue
527
+
528
+ person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
529
+ (startX, startY, endX, endY) = person_box.astype("int")
530
+ rects.append(person_box)
531
+
532
+ boundingboxes = np.array(rects)
533
+ boundingboxes = boundingboxes.astype(int)
534
+ rects = non_max_suppression_fast(boundingboxes, 0.3)
535
+
536
+ objects = tracker.update(rects)
537
+ for (objectId, bbox) in objects.items():
538
+ x1, y1, x2, y2 = bbox
539
+ x1 = int(x1)
540
+ y1 = int(y1)
541
+ x2 = int(x2)
542
+ y2 = int(y2)
543
+
544
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
545
+ text = "ID: {}".format(objectId)
546
+ # print(text)
547
+ cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
548
+ if objectId not in object_id_list:
549
+ object_id_list.append(objectId)
550
+ fps_end_time = datetime.datetime.now()
551
+ time_diff = fps_end_time - fps_start_time
552
+ if time_diff.seconds == 0:
553
+ fps = 0.0
554
+ else:
555
+ fps = (total_frames / time_diff.seconds)
556
+
557
+ fps_text = "FPS: {:.2f}".format(fps)
558
+
559
+ cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
560
+ lpc_count = len(objects)
561
+ opc_count = len(object_id_list)
562
+
563
+ lpc_txt = "LPC: {}".format(lpc_count)
564
+ opc_txt = "OPC: {}".format(opc_count)
565
+
566
+ count += 1
567
+ if count % 4 != 0:
568
+ continue
569
+ # frame=cv.resize(frame, (600,500))
570
+ # cv2.line(frame, pt1, pt2,color,2)
571
+ # cv2.line(frame, pt3, pt4,color,2)
572
+ results = model(frame,size)
573
+ components = results.pandas().xyxy[0]
574
+ for index, row in results.pandas().xyxy[0].iterrows():
575
+ x1 = int(row['xmin'])
576
+ y1 = int(row['ymin'])
577
+ x2 = int(row['xmax'])
578
+ y2 = int(row['ymax'])
579
+ confidence = (row['confidence'])
580
+ obj = (row['class'])
581
+
582
+
583
+ # min':x1,'ymin':y1,'xmax':x2,'ymax':y2,'confidence':confidence,'Object':obj}
584
+ # if lpc_txt is not None:
585
+ # try:
586
+ # db["student Count"] = [lpc_txt]
587
+ # except:
588
+ # db["student Count"] = ['N/A']
589
+ if obj == 0:
590
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
591
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
592
+ rectcenter = int(rectx1),int(recty1)
593
+ cx = rectcenter[0]
594
+ cy = rectcenter[1]
595
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
596
+ cv2.putText(frame,str(b), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
597
+
598
+ db["student Count"] = [lpc_txt]
599
+ db['Date'] = [date_time]
600
+ db['id'] = ['N/A']
601
+ db['Mobile']=['N/A']
602
+ db['Watch'] = ['N/A']
603
+ if cy<(cy1+offset) and cy>(cy1-offset):
604
+ DB = []
605
+ counter+=1
606
+ DB.append(counter)
607
+
608
+ ff = DB[-1]
609
+ fx = str(ff)
610
+ # cv2.line(frame, pt1, pt2,(0, 0, 255),2)
611
+ # if cy<(cy2+offset) and cy>(cy2-offset):
612
+
613
+ # cv2.line(frame, pt3, pt4,(0, 0, 255),2)
614
+ font = cv2.FONT_HERSHEY_TRIPLEX
615
+ cv2.putText(frame,fx,(50, 50),font, 1,(0, 0, 255),2,cv2.LINE_4)
616
+ cv2.putText(frame,"Movement",(70, 70),font, 1,(0, 0, 255),2,cv2.LINE_4)
617
+ kpil2_text.write(f"<h5 style='text-align: left; color:red;'>{text}</h5>", unsafe_allow_html=True)
618
+
619
+
620
+ db['id'] = [text]
621
+ name = "/screenshot/"+str(date_time) + '.jpg'
622
+ print ('Creating...' + name)
623
+ cv2.imwrite(name, frame)
624
+
625
+ # myScreenshot = pyautogui.screenshot()
626
+ # if st.buttn("Dowload ss"):
627
+ # myScreenshot.save(r'name.png')
628
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
629
+ if obj == 67:
630
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
631
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
632
+ rectcenter = int(rectx1),int(recty1)
633
+ cx = rectcenter[0]
634
+ cy = rectcenter[1]
635
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
636
+ cv2.putText(frame,str(mobile), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
637
+ cv2.putText(frame,'Mobile',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4)
638
+ kpil3_text.write(f"<h5 style='text-align: left; color:red;'>{mobile}{text}</h5>", unsafe_allow_html=True)
639
+ db['Mobile']=mobile+' '+text
640
+ name = "/screenshot/"+str(date_time) + '.jpg'
641
+ print ('Creating...' + name)
642
+
643
+ # writing the extracted images
644
+ cv2.imwrite(name, frame)
645
+
646
+ # myScreenshot = pyautogui.screenshot()
647
+ # if st.buttn("Dowload ss"):
648
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
649
+ # myScreenshot.save(r'name.png')
650
+
651
+ if obj == 75:
652
+ cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
653
+ rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
654
+ rectcenter = int(rectx1),int(recty1)
655
+ cx = rectcenter[0]
656
+ cy = rectcenter[1]
657
+ cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
658
+ cv2.putText(frame,str(watch), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
659
+ cv2.putText(frame,'Watch',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4)
660
+ kpil6_text.write(f"<h5 style='text-align: left; color:red;'>{watch}</h5>", unsafe_allow_html=True)
661
+
662
+
663
+ db['Watch']=watch
664
+ name = "/screenshot/"+str(date_time) + '.jpg'
665
+ print ('Creating...' + name)
666
+ cv2.imwrite(name, frame)
667
+
668
+ # writing the extracted images
669
+
670
+ # myScreenshot = pyautogui.screenshot()
671
+ # if st.buttn("Dowload ss"):
672
+ # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png')
673
+ # myScreenshot.save(r'name.png')
674
+
675
+
676
+
677
+ kpil_text.write(f"<h5 style='text-align: left; color:red;'>{int(fps)}</h5>", unsafe_allow_html=True)
678
+ kpil5_text.write(f"<h5 style='text-align: left; color:red;'>{lpc_txt}</h5>", unsafe_allow_html=True)
679
+ kpil6_text.write(f"<h5 style='text-align: left; color:red;'>{width*height}</h5>",
680
+ unsafe_allow_html=True)
681
+
682
+
683
+ frame = cv.resize(frame,(0,0), fx=0.8, fy=0.8)
684
+ frame = image_resize(image=frame, width=640)
685
+ stframe.image(frame,channels='BGR', use_column_width=True)
686
+ df = pd.DataFrame(db)
687
+ df.to_csv('final.csv',mode='a',header=False,index=False)
688
+ except:
689
+ pass
690
+ with open('final.csv') as f:
691
+ st.download_button(label = 'Download Cheating Report',data=f,file_name='data.csv')
692
+
693
+ os.remove("final.csv")
694
+ main()