SpectraFaceAuth commited on
Commit
864d3c8
1 Parent(s): 2a2a4bd

Upload 4 files

Browse files
Files changed (4) hide show
  1. ModelTransferLearning.py +99 -0
  2. PredictFace.py +42 -0
  3. config.py +6 -0
  4. trial1.py +243 -0
ModelTransferLearning.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import csv
4
+ import tensorflow as tf
5
+ from sklearn.model_selection import train_test_split
6
+ import cv2
7
+ from pathlib import Path
8
+ from tensorflow.keras.models import Sequential
9
+ from tensorflow.keras.layers import Dense, Flatten, Input
10
+ from tensorflow.keras.optimizers import Adam
11
+ from keras.applications import vgg16
12
+
13
+ def ModelFineTuning():
14
+ # Define the path to your dataset
15
+ data_dir = Path('Dataset')
16
+ image_size = (224, 224) # VGGFace model expects 224x224 images
17
+
18
+ # Initialize dictionaries
19
+ candidates_dict = {}
20
+ labels_dict = {}
21
+
22
+ # Get all class folder names
23
+ class_folders = [folder.name for folder in data_dir.iterdir() if folder.is_dir()]
24
+ total_classes = len(class_folders)
25
+
26
+ # Assign labels to each class
27
+ for idx, class_name in enumerate(class_folders):
28
+ candidates_dict[class_name] = list(data_dir.glob(f'{class_name}/*'))
29
+ labels_dict[class_name] = idx
30
+
31
+ df = pd.DataFrame(list(labels_dict.items()), columns=['Candidate Name', 'Label'])
32
+ df.to_csv("candidate_labels.csv", index=False)
33
+
34
+ # Print the results
35
+ print('Images Dictionary:')
36
+ print(candidates_dict)
37
+ print('\nLabels Dictionary:')
38
+ print(labels_dict)
39
+
40
+ X, y = [], []
41
+ if len(candidates_dict.items()) == 0:
42
+ return False
43
+ for candidate_name, faces in candidates_dict.items():
44
+ for image in faces:
45
+ img = cv2.imread(str(image))
46
+ resized_img = cv2.resize(img, image_size)
47
+ X.append(resized_img)
48
+ y.append(labels_dict[candidate_name])
49
+
50
+ print(len(X))
51
+
52
+ X = np.array(X)
53
+ y = np.array(y)
54
+
55
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
56
+
57
+ X_train_scaled = X_train / 255.0
58
+ X_test_scaled = X_test / 255.0
59
+
60
+ # Convert labels to one-hot encoding
61
+ y_train = tf.keras.utils.to_categorical(y_train, num_classes=total_classes)
62
+ y_test = tf.keras.utils.to_categorical(y_test, num_classes=total_classes)
63
+
64
+ # Load the pre-trained VGGFace model
65
+ base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
66
+
67
+ # Ensure the base model layers are not trainable
68
+ for layer in base_model.layers:
69
+ layer.trainable = False
70
+
71
+ # Create a Sequential model and add layers
72
+ model = Sequential()
73
+ model.add(Input(shape=(224, 224, 3)))
74
+ model.add(base_model)
75
+ model.add(Flatten())
76
+ model.add(Dense(1024, activation='relu'))
77
+ model.add(Dense(512, activation='relu'))
78
+ model.add(Dense(total_classes, activation='softmax'))
79
+
80
+ # Compile the model
81
+ model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
82
+
83
+ # Train the model
84
+ history = model.fit(
85
+ X_train_scaled, y_train,
86
+ validation_data=(X_test_scaled, y_test),
87
+ epochs=10, # Adjust the number of epochs based on your needs
88
+ batch_size=32
89
+ )
90
+
91
+ # Evaluate the model
92
+ loss, accuracy = model.evaluate(X_test_scaled, y_test)
93
+ print(f"Test accuracy: {accuracy * 100:.2f}%")
94
+
95
+ # Save the fine-tuned model
96
+ model.save('fine_tuned_VGG16_model.h5')
97
+ return True
98
+
99
+ # ModelFineTuning() # Uncomment this line to run the training
PredictFace.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import tensorflow as tf
4
+ import cv2
5
+ from pathlib import Path
6
+ from datetime import datetime
7
+ from tensorflow.keras.models import load_model
8
+
9
+
10
+ # Function to preprocess the image
11
+ def preprocess_image(image_path, target_size=(224, 224)):
12
+ img = cv2.imread(image_path)
13
+ img = cv2.resize(img, target_size)
14
+ img = img / 255.0
15
+ img = np.expand_dims(img, axis=0)
16
+ return img
17
+
18
+ # Function to predict the label of the image
19
+ def predict_candidate(image_path):
20
+
21
+ model = load_model('fine_tuned_VGG16_model.h5')
22
+
23
+ df = pd.read_csv('candidate_labels.csv')
24
+ labels_dict = df.set_index('Label')['Candidate Name'].to_dict()
25
+
26
+ if image_path == '':
27
+ image_path = 'Dataset/test_img.jpg'
28
+
29
+ img = preprocess_image(image_path)
30
+ prediction = model.predict(img)
31
+ predicted_class = np.argmax(prediction, axis=1)[0]
32
+ predicted_label = labels_dict[predicted_class]
33
+
34
+ # Get the current timestamp in 12-hour format without seconds
35
+ timestamp = datetime.now().strftime('%Y-%m-%d %I:%M %p')
36
+ # Save to CSV
37
+ attendance_record = pd.DataFrame([[timestamp, predicted_label]], columns=['Timestamp', 'Student ID Number'])
38
+ attendance_record.to_csv('Attendance_Record.csv', mode='a', header=False, index=False)
39
+
40
+ print("Label is", predicted_label)
41
+ return predicted_label
42
+
config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # config.py
2
+ import os
3
+
4
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
5
+ TF_OPS_PATH = os.path.join(BASE_DIR, 'path', 'to', 'tensorflow', 'python', 'ops')
6
+
trial1.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ import os
3
+ import base64
4
+ import numpy as np
5
+ import cv2
6
+ import tensorflow as tf
7
+ import csv
8
+ import config
9
+ from ModelTransferLearning import ModelFineTuning
10
+ from watchdog.observers import Observer
11
+ from watchdog.events import FileSystemEventHandler
12
+ import base64
13
+ from io import BytesIO
14
+ from PIL import Image
15
+ from PredictFace import preprocess_image, predict_candidate
16
+
17
+ app = Flask(__name__)
18
+
19
+ # Load the Caffe model
20
+ net = cv2.dnn.readNetFromCaffe('models/deploy.prototxt', 'models/res10_300x300_ssd_iter_140000.caffemodel')
21
+
22
+ ###
23
+ def save_att_photos(images):
24
+ output_folder = 'AttendanceCapture'
25
+ if not os.path.exists(output_folder):
26
+ os.makedirs(output_folder)
27
+ print(f"Created directory: {output_folder}")
28
+ else:
29
+ print(f"Directory already exists: {output_folder}")
30
+
31
+ count = 0
32
+ for i, image_data in enumerate(images):
33
+ try:
34
+ print(f"Processing image {i + 1}")
35
+ image_data = base64.b64decode(image_data.split(',')[1])
36
+ image_np = np.frombuffer(image_data, np.uint8)
37
+ image = cv2.imdecode(image_np, cv2.IMREAD_COLOR)
38
+ if image is None:
39
+ print(f"Failed to decode image {i + 1}")
40
+ continue
41
+
42
+ (h, w) = image.shape[:2]
43
+ blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False)
44
+ net.setInput(blob)
45
+ detections = net.forward()
46
+
47
+ for j in range(detections.shape[2]):
48
+ confidence = detections[0, 0, j, 2]
49
+ if confidence > 0.7:
50
+ box = detections[0, 0, j, 3:7] * np.array([w, h, w, h])
51
+ (startX, startY, endX, endY) = box.astype("int")
52
+ face = image[startY:endY, startX:endX]
53
+ cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)
54
+
55
+ count += 1
56
+ face_filename = os.path.join(output_folder, f'face_{count}.jpg')
57
+ cv2.imwrite(face_filename, face)
58
+ print(f'Saved {face_filename}')
59
+ break
60
+ except Exception as e:
61
+ print(f"Error processing image {i + 1}: {e}")
62
+ continue
63
+
64
+ print(f"Total faces saved: {count}")
65
+ # save_to_csv(student_id, name)
66
+ return count >= 1
67
+ ###
68
+
69
+ def save_photos(student_id, images):
70
+ output_folder = f'Dataset/{student_id}'
71
+ if not os.path.exists(output_folder):
72
+ os.makedirs(output_folder)
73
+ print(f"Created directory: {output_folder}")
74
+ else:
75
+ print(f"Directory already exists: {output_folder}")
76
+
77
+ count = 0
78
+ for i, image_data in enumerate(images):
79
+ try:
80
+ print(f"Processing image {i + 1}")
81
+ image_data = base64.b64decode(image_data.split(',')[1])
82
+ image_np = np.frombuffer(image_data, np.uint8)
83
+ image = cv2.imdecode(image_np, cv2.IMREAD_COLOR)
84
+ if image is None:
85
+ print(f"Failed to decode image {i + 1}")
86
+ continue
87
+
88
+ (h, w) = image.shape[:2]
89
+ blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False)
90
+ net.setInput(blob)
91
+ detections = net.forward()
92
+
93
+ for j in range(detections.shape[2]):
94
+ confidence = detections[0, 0, j, 2]
95
+ if confidence > 0.7:
96
+ box = detections[0, 0, j, 3:7] * np.array([w, h, w, h])
97
+ (startX, startY, endX, endY) = box.astype("int")
98
+ face = image[startY:endY, startX:endX]
99
+ cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)
100
+
101
+ count += 1
102
+ face_filename = os.path.join(output_folder, f'face_{count}.jpg')
103
+ cv2.imwrite(face_filename, face)
104
+ print(f'Saved {face_filename}')
105
+ break
106
+ except Exception as e:
107
+ print(f"Error processing image {i + 1}: {e}")
108
+ continue
109
+
110
+ print(f"Total faces saved: {count}")
111
+ # save_to_csv(student_id, name)
112
+ return count >= 20
113
+
114
+
115
+ @app.route('/start_capture', methods=['POST'])
116
+ def start_capture():
117
+ data = request.get_json()
118
+ student_id = data.get('student_id')
119
+ images = data.get('images')
120
+
121
+ if not student_id or not images:
122
+ return jsonify({'success': False, 'message': 'Invalid data'})
123
+
124
+ try:
125
+ if save_photos(student_id, images):
126
+ return jsonify({'success': True, 'message': 'Images saved successfully'})
127
+ else:
128
+ return jsonify({'success': False, 'message': 'Failed to save sufficient images'})
129
+
130
+ except Exception as e:
131
+ print(f'Error: {e}')
132
+ return jsonify({'success': False, 'message': 'Failed to save images'})
133
+
134
+
135
+
136
+ @app.route('/model_training', methods=['POST'])
137
+ def model_training():
138
+ success = ModelFineTuning()
139
+ if success:
140
+ return jsonify(success=True, message="Model training completed successfully.")
141
+ else:
142
+ return jsonify(success=False, message="Model training failed.")
143
+
144
+
145
+ ###
146
+ @app.route('/take_photo', methods=['POST'])
147
+ def take_photo():
148
+ data = request.get_json()
149
+ # student_id = data.get('student_id')
150
+ images = data.get('images')
151
+
152
+ try:
153
+ if save_att_photos(images):
154
+ return jsonify({'success': True, 'message': 'Image saved successfully'})
155
+ else:
156
+ return jsonify({'success': False, 'message': 'Failed to save image'})
157
+
158
+ except Exception as e:
159
+ print(f'Error: {e}')
160
+ return jsonify({'success': False, 'message': 'Failed to save image'})
161
+
162
+
163
+ @app.route('/face_prediction', methods=['POST'])
164
+ def face_prediction():
165
+ image_path = 'AttendanceCapture/face_1.jpg'
166
+ predicted_label = predict_candidate(image_path)
167
+ # success = predict_candidate(image_path)
168
+ if predicted_label:
169
+ return jsonify(success=True, predicted_label = predicted_label)
170
+ else:
171
+ return jsonify(success=False, message="Attendance failed")
172
+
173
+ ###
174
+
175
+
176
+
177
+ @app.route('/check_student_id', methods=['GET'])
178
+ def check_student_id():
179
+ student_id = request.args.get('student_id')
180
+ directory_exists = os.path.exists(f'Dataset/{student_id}')
181
+ return jsonify({"directory_exists": directory_exists})
182
+
183
+
184
+
185
+ @app.route("/register", methods=["POST", "GET"])
186
+ def register():
187
+ if request.method == "POST":
188
+ name = request.form["name"]
189
+ student_id = request.form["student_id"]
190
+ if os.path.exists(f'Dataset/{student_id}'):
191
+ return jsonify({"success": False, "message": "Student is already registered."})
192
+ else:
193
+ return jsonify({"success": True, "student_id": student_id})
194
+ return render_template("register.html")
195
+
196
+
197
+
198
+ @app.route('/capture')
199
+ def capture_photos():
200
+ student_id = request.args.get('student_id')
201
+ # name = request.args.get('name')
202
+ print(f"Capturing photos for student_id: {student_id}")
203
+ return render_template("capture_photos.html", student_id=student_id)
204
+
205
+
206
+
207
+ @app.route('/attendance')
208
+ def attendance():
209
+ return render_template("attendance.html")
210
+
211
+
212
+
213
+ @app.route('/')
214
+ def home():
215
+ return render_template("index.html")
216
+
217
+
218
+ # if __name__ == "__main__":
219
+ # app.run(debug=True)
220
+
221
+
222
+ class TrainingHandler(FileSystemEventHandler):
223
+ def on_modified(self, event):
224
+ print(f'File changed: {event.src_path}')
225
+ if 'custom_gradient.py' in event.src_path:
226
+ print("Restarting training...")
227
+ ModelFineTuning()
228
+
229
+ if __name__ == "__main__":
230
+
231
+ path_to_watch = os.environ.get('TF_OPS_PATH', 'C:\\Users\\Shruti Sundaram\\AppData\\Local\\Programs\\Python\\Python310\\Lib\\site-packages\\tensorflow\\python\\ops')
232
+ # path_to_watch = config.TF_OPS_PATH
233
+ event_handler = TrainingHandler()
234
+ observer = Observer()
235
+ observer.schedule(event_handler, path=path_to_watch, recursive=False)
236
+ observer.start()
237
+
238
+ try:
239
+ print("Starting initial training...")
240
+ app.run(debug=True)
241
+ except KeyboardInterrupt:
242
+ observer.stop()
243
+ observer.join()