Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Importing Project Dependencies
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import pandas as pd
|
5 |
+
import tensorflow as tf
|
6 |
+
from tensorflow import keras
|
7 |
+
import time
|
8 |
+
import winsound
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
# Setting up config for GPU usage
|
12 |
+
physical_devices = tf.config.list_physical_devices("GPU")
|
13 |
+
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
14 |
+
|
15 |
+
# Using Har-cascade classifier from OpenCV
|
16 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
17 |
+
|
18 |
+
# Loading the trained model for prediction purpose
|
19 |
+
model = keras.models.load_model('my_model (1).h5')
|
20 |
+
|
21 |
+
# Title for GUI
|
22 |
+
st.title('Drowsiness Detection')
|
23 |
+
img = []
|
24 |
+
|
25 |
+
# Navigation Bar
|
26 |
+
nav_choice = st.sidebar.radio('Navigation', ('Home', 'Sleep Detection', 'Help Us Improve'), index=0)
|
27 |
+
# Home page
|
28 |
+
if nav_choice == 'Home':
|
29 |
+
st.header('Prevents sleep deprivation road accidents, by alerting drowsy drivers.')
|
30 |
+
st.image('ISHN0619_C3_pic.jpg')
|
31 |
+
st.markdown('<b>In accordance with the survey taken by the Times Of India, about 40 % of road </b>'
|
32 |
+
'<b>accidents are caused</b> '
|
33 |
+
'<b>due to sleep deprivation & fatigued drivers. In order to address this issue, this app will </b>'
|
34 |
+
'<b>alert such drivers with the help of deep learning models and computer vision.</b>'
|
35 |
+
'', unsafe_allow_html=True)
|
36 |
+
st.image('sleep.jfif', width=300)
|
37 |
+
st.markdown('<h1>How to use?<br></h1>'
|
38 |
+
'<b>1. Go to Sleep Detection page from the Navigation Side-Bar.</b><br>'
|
39 |
+
'<b>2. Make sure that, you have sufficient amount of light, in your room.</b><br>'
|
40 |
+
'<b>3. Align yourself such that, you are clearly visible in the web-cam and '
|
41 |
+
'stay closer to the web-cam. </b><br>'
|
42 |
+
'<b>4. Web-cam will take 3 pictures of you, so keep your eyes in the same state'
|
43 |
+
' (open or closed) for about 5 seconds.</b><br>'
|
44 |
+
'<b>5. If your eyes are closed, the model will make a beep sound to alert you.</b><br>'
|
45 |
+
'<b>6. Otherwise, the model will continue taking your pictures at regular intervals of time.</b><br>'
|
46 |
+
'<font color="red"><br><b>For the purpose of the training process of the model, '
|
47 |
+
'dataset used is available <a href="https://www.kaggle.com/kutaykutlu/drowsiness-detection", '
|
48 |
+
'target="_blank">here</a></font></b>'
|
49 |
+
, unsafe_allow_html=True)
|
50 |
+
|
51 |
+
# Sleep Detection page
|
52 |
+
elif nav_choice == 'Sleep Detection':
|
53 |
+
st.header('Image Prediction')
|
54 |
+
cap = 0
|
55 |
+
st.success('Please look at your web-cam, while following all the instructions given on the Home page.')
|
56 |
+
st.warning(
|
57 |
+
'Keeping the eyes in the same state is important but you can obviously blink your eyes, if they are open!!!')
|
58 |
+
b = st.progress(0)
|
59 |
+
for i in range(100):
|
60 |
+
time.sleep(0.0001)
|
61 |
+
b.progress(i + 1)
|
62 |
+
|
63 |
+
start = st.radio('Options', ('Start', 'Stop'), key='Start_pred', index=1)
|
64 |
+
|
65 |
+
if start == 'Start':
|
66 |
+
decision = 0
|
67 |
+
st.markdown('<font face="Comic sans MS"><b>Detected Facial Region of Interest(ROI)     Extractd'
|
68 |
+
' Eye Features from the ROI</b></font>', unsafe_allow_html=True)
|
69 |
+
|
70 |
+
# Best of 3 mechanism for drowsiness detection
|
71 |
+
for _ in range(3):
|
72 |
+
cap = cv2.VideoCapture(0)
|
73 |
+
ret, frame = cap.read()
|
74 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
75 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
76 |
+
# Proposal of face region by the har cascade classifier
|
77 |
+
for (x, y, w, h) in faces:
|
78 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5)
|
79 |
+
roi_gray = gray[y:y + w, x:x + w]
|
80 |
+
roi_color = frame[y:y + h, x:x + w]
|
81 |
+
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
82 |
+
|
83 |
+
try:
|
84 |
+
# Cenentroid method for extraction of eye-patch
|
85 |
+
centx, centy = roi_color.shape[:2]
|
86 |
+
centx //= 2
|
87 |
+
centy //= 2
|
88 |
+
eye_1 = roi_color[centy - 40: centy, centx - 70: centx]
|
89 |
+
eye_1 = cv2.resize(eye_1, (86, 86))
|
90 |
+
eye_2 = roi_color[centy - 40: centy, centx: centx + 70]
|
91 |
+
eye_2 = cv2.resize(eye_2, (86, 86))
|
92 |
+
cv2.rectangle(frame1, (x + centx - 60, y + centy - 40), (x + centx - 10, y + centy), (0, 255, 0), 5)
|
93 |
+
cv2.rectangle(frame1, (x + centx + 10, y + centy - 40), (x + centx + 60, y + centy), (0, 255, 0), 5)
|
94 |
+
preds_eye1 = model.predict(np.expand_dims(eye_1, axis=0))
|
95 |
+
preds_eye2 = model.predict(np.expand_dims(eye_2, axis=0))
|
96 |
+
e1, e2 = np.argmax(preds_eye1), np.argmax(preds_eye2)
|
97 |
+
|
98 |
+
# Display of face image and extracted eye-patch
|
99 |
+
img_container = st.beta_columns(4)
|
100 |
+
img_container[0].image(frame1, width=250)
|
101 |
+
img_container[2].image(cv2.cvtColor(eye_1, cv2.COLOR_BGR2RGB), width=150)
|
102 |
+
img_container[3].image(cv2.cvtColor(eye_2, cv2.COLOR_BGR2RGB), width=150)
|
103 |
+
print(e1, e2)
|
104 |
+
|
105 |
+
# Decision variable for prediction
|
106 |
+
if e1 == 1 or e2 == 1:
|
107 |
+
pass
|
108 |
+
else:
|
109 |
+
decision += 1
|
110 |
+
|
111 |
+
except NameError:
|
112 |
+
st.warning('Hold your camera closer!!!\nTrying again in 2s')
|
113 |
+
cap.release()
|
114 |
+
time.sleep(1)
|
115 |
+
continue
|
116 |
+
|
117 |
+
except:
|
118 |
+
cap.release()
|
119 |
+
continue
|
120 |
+
|
121 |
+
finally:
|
122 |
+
cap.release()
|
123 |
+
|
124 |
+
# If found drowsy, then make a beep sound to alert the driver
|
125 |
+
if decision == 0:
|
126 |
+
st.error('Eye(s) are closed')
|
127 |
+
winsound.Beep(2500, 2000)
|
128 |
+
|
129 |
+
else:
|
130 |
+
st.success('Eyes are Opened')
|
131 |
+
st.warning('Please select "Stop" and then "Start" to try again')
|
132 |
+
|
133 |
+
# Help Us Improve page
|
134 |
+
else:
|
135 |
+
st.header('Help Us Improve')
|
136 |
+
st.success('We would appreciate your Help!!!')
|
137 |
+
st.markdown(
|
138 |
+
'<font face="Comic sans MS">To make this app better, we would appreciate your small amount of time.</font>'
|
139 |
+
'<font face="Comic sans MS">Let me take you through, some of the basic statistical analysis of this </font>'
|
140 |
+
'<font face="Comic sans MS">model. <br><b>Accuracy with naked eyes = 99.5%<br>Accuracy with spectacles = 96.8%</b><br></font> '
|
141 |
+
'<font face="Comic sans MS">As we can see here, accuracy with spectacles is not at all spectacular, and hence to make this app </font>'
|
142 |
+
'<font face="Comic sans MS">better, and to use it in real-time situations, we require as much data as we can gather.</font> '
|
143 |
+
, unsafe_allow_html=True)
|
144 |
+
st.warning('NOTE: Your identity will be kept anonymous, and only your eye-patch will be extracted!!!')
|
145 |
+
# Image upload
|
146 |
+
img_upload = st.file_uploader('Upload Image Here', ['png', 'jpg', 'jpeg'])
|
147 |
+
if img_upload is not None:
|
148 |
+
prog = st.progress(0)
|
149 |
+
to_add = cv2.imread(str(img_upload.read()), 0)
|
150 |
+
to_add = pd.DataFrame(to_add)
|
151 |
+
|
152 |
+
# Save it in the database
|
153 |
+
to_add.to_csv('Data_from_users.csv', mode='a', header=False, index=False, sep=';')
|
154 |
+
for i in range(100):
|
155 |
+
time.sleep(0.001)
|
156 |
+
prog.progress(i + 1)
|
157 |
+
st.success('Uploaded Successfully!!! Thank you for contributing.')
|