brxerq commited on
Commit
27f9a13
·
1 Parent(s): 24852ee

Create anti-spoofing.py

Browse files
Files changed (1) hide show
  1. anti-spoofing.py +195 -0
anti-spoofing.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import dlib
3
+ import numpy as np
4
+ import os
5
+ import time
6
+ import mediapipe as mp
7
+ from skimage import feature
8
+
9
+ class AntiSpoofingSystem:
10
+ def __init__(self):
11
+ self.detector = dlib.get_frontal_face_detector()
12
+ self.predictor = dlib.shape_predictor("PreTrained/shape_predictor_68_face_landmarks.dat")
13
+
14
+ self.mp_hands = mp.solutions.hands
15
+ self.hands = self.mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)
16
+
17
+ self.cap = cv2.VideoCapture(0)
18
+ self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
19
+ self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
20
+
21
+ self.save_directory = "/Person"
22
+ if not os.path.exists(self.save_directory):
23
+ os.makedirs(self.save_directory)
24
+
25
+ self.net_smartphone = cv2.dnn.readNet('PreTrained/yolov4.weights', 'PreTrained/yolov4.cfg')
26
+ with open('PreTrained/coco.names', 'r') as f:
27
+ self.classes_smartphone = f.read().strip().split('\n')
28
+
29
+ self.EAR_THRESHOLD = 0.25
30
+ self.BLINK_CONSEC_FRAMES = 4
31
+
32
+ self.left_eye_state = False
33
+ self.right_eye_state = False
34
+ self.left_blink_counter = 0
35
+ self.right_blink_counter = 0
36
+
37
+ self.smartphone_detected = False
38
+ self.smartphone_detection_frame_interval = 30
39
+ self.frame_count = 0
40
+
41
+ def calculate_ear(self, eye):
42
+ A = np.linalg.norm(eye[1] - eye[5])
43
+ B = np.linalg.norm(eye[2] - eye[4])
44
+ C = np.linalg.norm(eye[0] - eye[3])
45
+ return (A + B) / (2.0 * C)
46
+
47
+ def analyze_texture(self, face_region):
48
+ gray_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
49
+ lbp = feature.local_binary_pattern(gray_face, P=8, R=1, method="uniform")
50
+ lbp_hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, 58), range=(0, 58))
51
+ lbp_hist = lbp_hist.astype("float")
52
+ lbp_hist /= (lbp_hist.sum() + 1e-5)
53
+ return np.sum(lbp_hist[:10]) > 0.3
54
+
55
+ def detect_hand_gesture(self, frame):
56
+ results = self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
57
+ return results.multi_hand_landmarks is not None
58
+
59
+ def detect_smartphone(self, frame):
60
+ if self.frame_count % self.smartphone_detection_frame_interval == 0:
61
+ blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
62
+ self.net_smartphone.setInput(blob)
63
+ output_layers_names = self.net_smartphone.getUnconnectedOutLayersNames()
64
+ detections = self.net_smartphone.forward(output_layers_names)
65
+
66
+ for detection in detections:
67
+ for obj in detection:
68
+ scores = obj[5:]
69
+ class_id = np.argmax(scores)
70
+ confidence = scores[class_id]
71
+ if confidence > 0.5 and self.classes_smartphone[class_id] == 'cell phone':
72
+ center_x = int(obj[0] * frame.shape[1])
73
+ center_y = int(obj[1] * frame.shape[0])
74
+ width = int(obj[2] * frame.shape[1])
75
+ height = int(obj[3] * frame.shape[0])
76
+ left = int(center_x - width / 2)
77
+ top = int(center_y - height / 2)
78
+
79
+ cv2.rectangle(frame, (left, top), (left + width, top + height), (0, 0, 255), 2)
80
+ cv2.putText(frame, 'Smartphone Detected', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
81
+
82
+ self.smartphone_detected = True
83
+ self.left_blink_counter = 0
84
+ self.right_blink_counter = 0
85
+ return
86
+
87
+ self.frame_count += 1
88
+ self.smartphone_detected = False
89
+
90
+ def detect_blink(self, left_ear, right_ear):
91
+ if self.smartphone_detected:
92
+ self.left_eye_state = False
93
+ self.right_eye_state = False
94
+ self.left_blink_counter = 0
95
+ self.right_blink_counter = 0
96
+ return False
97
+
98
+ if left_ear < self.EAR_THRESHOLD:
99
+ if not self.left_eye_state:
100
+ self.left_eye_state = True
101
+ else:
102
+ if self.left_eye_state:
103
+ self.left_eye_state = False
104
+ self.left_blink_counter += 1
105
+
106
+ if right_ear < self.EAR_THRESHOLD:
107
+ if not self.right_eye_state:
108
+ self.right_eye_state = True
109
+ else:
110
+ if self.right_eye_state:
111
+ self.right_eye_state = False
112
+ self.right_blink_counter += 1
113
+
114
+ if self.left_blink_counter > 0 and self.right_blink_counter > 0:
115
+ self.left_blink_counter = 0
116
+ self.right_blink_counter = 0
117
+ return True
118
+ else:
119
+ return False
120
+
121
+ def run(self):
122
+ blink_count = 0
123
+ hand_gesture_detected = False
124
+ image_captured = False
125
+ last_event_time = time.time()
126
+ event_timeout = 60
127
+ message_displayed = False
128
+
129
+ while True:
130
+ ret, frame = self.cap.read()
131
+ if not ret:
132
+ break
133
+
134
+ self.detect_smartphone(frame)
135
+
136
+ if self.smartphone_detected:
137
+ cv2.putText(frame, "Mobile phone detected, can't record attendance", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
138
+ blink_count = 0
139
+
140
+ if not self.smartphone_detected:
141
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
142
+ faces = self.detector(gray)
143
+
144
+ for face in faces:
145
+ landmarks = self.predictor(gray, face)
146
+ leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
147
+ rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
148
+
149
+ ear_left = self.calculate_ear(leftEye)
150
+ ear_right = self.calculate_ear(rightEye)
151
+
152
+ if self.detect_blink(ear_left, ear_right):
153
+ blink_count += 1
154
+
155
+ cv2.putText(frame, f"Blink Count: {blink_count}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
156
+
157
+ hand_gesture_detected = self.detect_hand_gesture(frame)
158
+
159
+ if hand_gesture_detected:
160
+ cv2.putText(frame, "Hand Gesture Detected", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
161
+
162
+ (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
163
+ expanded_region = frame[max(y - h // 2, 0):min(y + 3 * h // 2, frame.shape[0]),
164
+ max(x - w // 2, 0):min(x + 3 * w // 2, frame.shape[1])]
165
+
166
+ if blink_count >= 5 and hand_gesture_detected and self.analyze_texture(expanded_region) and not message_displayed:
167
+ cv2.putText(frame, "Please hold still for 2 seconds...", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
168
+ cv2.imshow("Frame", frame)
169
+ cv2.waitKey(1)
170
+ time.sleep(2)
171
+ message_displayed = True
172
+
173
+ if message_displayed and not image_captured:
174
+ timestamp = int(time.time())
175
+ picture_name = f"person_mediumres_face_{timestamp}.jpg"
176
+ cv2.imwrite(os.path.join(self.save_directory, picture_name), expanded_region)
177
+ image_captured = True
178
+
179
+ cv2.imshow("Frame", frame)
180
+ if image_captured or (time.time() - last_event_time > event_timeout and not hand_gesture_detected):
181
+ break
182
+ if cv2.waitKey(1) & 0xFF == ord('q'):
183
+ break
184
+
185
+ self.cap.release()
186
+ cv2.destroyAllWindows()
187
+
188
+ if image_captured:
189
+ print(f"Person detected. Face image captured and saved as {picture_name}.")
190
+ elif not hand_gesture_detected:
191
+ print("No real person detected")
192
+
193
+ if __name__ == "__main__":
194
+ anti_spoofing_system = AntiSpoofingSystem()
195
+ anti_spoofing_system.run()