Upload 3 files
Browse files- .gitattributes +1 -0
- app.py +107 -0
- ddm.keras +3 -0
- driver distraction model-Copy1.ipynb +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
ddm.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from glob import glob
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import pandas as pd
|
6 |
+
from sklearn.model_selection import train_test_split
|
7 |
+
from keras.utils import to_categorical
|
8 |
+
from keras.models import load_model
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
# Define constants
|
12 |
+
img_rows = 64
|
13 |
+
img_cols = 64
|
14 |
+
color_type = 1 # Grey
|
15 |
+
NUMBER_CLASSES = 10
|
16 |
+
|
17 |
+
# Load the saved model
|
18 |
+
model = load_model('ddm.keras')
|
19 |
+
|
20 |
+
# Define the get_cv2_image function
|
21 |
+
def get_cv2_image(path, img_rows, img_cols, color_type=1):
|
22 |
+
"""
|
23 |
+
Read and resize an image using OpenCV.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
path (str): Path to the image file.
|
27 |
+
img_rows (int): Desired number of rows for the resized image.
|
28 |
+
img_cols (int): Desired number of columns for the resized image.
|
29 |
+
color_type (int): Type of color space. Default is 1 (gray).
|
30 |
+
|
31 |
+
Returns:
|
32 |
+
np.ndarray: Resized image array.
|
33 |
+
"""
|
34 |
+
if color_type == 1:
|
35 |
+
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
|
36 |
+
elif color_type == 3:
|
37 |
+
img = cv2.imread(path, cv2.IMREAD_COLOR)
|
38 |
+
else:
|
39 |
+
raise ValueError("Invalid color_type. Use 1 for grayscale or 3 for RGB.")
|
40 |
+
|
41 |
+
img = cv2.resize(img, (img_rows, img_cols))
|
42 |
+
return img
|
43 |
+
|
44 |
+
# Load dataset
|
45 |
+
def load_test(size=200, img_rows=64, img_cols=64, color_type=1):
|
46 |
+
path = os.path.join(r"C:\Users\Gayathri Anil\Downloads\imgs\test", '*.jpg')
|
47 |
+
files = sorted(glob(path))
|
48 |
+
X_test, X_test_id = [], []
|
49 |
+
total = 0
|
50 |
+
files_size = len(files)
|
51 |
+
for file in files:
|
52 |
+
if total >= size or total >= files_size:
|
53 |
+
break
|
54 |
+
img = get_cv2_image(file, img_rows, img_cols, color_type)
|
55 |
+
X_test.append(img)
|
56 |
+
X_test_id.append(file)
|
57 |
+
total += 1
|
58 |
+
return X_test, X_test_id
|
59 |
+
|
60 |
+
# Activity map
|
61 |
+
activity_map = {'c0': 'Safe driving',
|
62 |
+
'c1': 'Texting - right',
|
63 |
+
'c2': 'Talking on the phone - right',
|
64 |
+
'c3': 'Texting - left',
|
65 |
+
'c4': 'Talking on the phone - left',
|
66 |
+
'c5': 'Operating the radio',
|
67 |
+
'c6': 'Drinking',
|
68 |
+
'c7': 'Reaching behind',
|
69 |
+
'c8': 'Hair and makeup',
|
70 |
+
'c9': 'Talking to passenger'}
|
71 |
+
|
72 |
+
# Streamlit app
|
73 |
+
def main():
|
74 |
+
st.title('Driver Activity Recognition')
|
75 |
+
st.sidebar.title('Options')
|
76 |
+
|
77 |
+
# Load and normalize sampled test data
|
78 |
+
test_files, _ = load_test()
|
79 |
+
|
80 |
+
# Select an image
|
81 |
+
selected_image_index = st.sidebar.selectbox('Select an image:', range(len(test_files)))
|
82 |
+
selected_image = test_files[selected_image_index]
|
83 |
+
|
84 |
+
# Display the selected image
|
85 |
+
st.image(selected_image, caption='Selected Image', use_column_width=True)
|
86 |
+
|
87 |
+
# Classify the image
|
88 |
+
classify_button = st.sidebar.button('Classify')
|
89 |
+
if classify_button:
|
90 |
+
classify_image(selected_image)
|
91 |
+
|
92 |
+
# Function to classify the image
|
93 |
+
def classify_image(selected_image):
|
94 |
+
img_brute = cv2.resize(selected_image, (img_rows, img_cols))
|
95 |
+
|
96 |
+
# Reshape the image
|
97 |
+
new_img = img_brute.reshape(-1, img_rows, img_cols, color_type)
|
98 |
+
|
99 |
+
# Predict the class label
|
100 |
+
y_prediction = model.predict(new_img)
|
101 |
+
predicted_class = np.argmax(y_prediction)
|
102 |
+
|
103 |
+
# Display the prediction
|
104 |
+
st.write('Predicted class:', activity_map.get('c{}'.format(predicted_class)))
|
105 |
+
|
106 |
+
if __name__ == '__main__':
|
107 |
+
main()
|
ddm.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9015fc4de6e7eeab204f187fd42b1cb6d2343d246c1c50216ea487f1d492b2f
|
3 |
+
size 36509330
|
driver distraction model-Copy1.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|