Munaamullah commited on
Commit
4de922e
·
1 Parent(s): df28fdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -19
app.py CHANGED
@@ -1,28 +1,169 @@
1
  import gradio as gr
2
- from anti_spoofing import AntiSpoofingSystem
 
 
 
 
 
 
 
 
3
 
4
- # Instantiate your AntiSpoofingSystem
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  anti_spoofing_system = AntiSpoofingSystem()
6
 
 
 
 
 
7
  def process_frame(image):
8
- # Process the single image
9
- # This is a placeholder for your actual processing logic
10
- processed_image, blink_count, hand_gesture_detected, smartphone_detected = anti_spoofing_system.process_single_frame(image)
11
-
12
- # Return results
13
- return processed_image, f"Blink Count: {blink_count}", f"Hand Gesture Detected: {hand_gesture_detected}", f"Smartphone Detected: {smartphone_detected}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  iface = gr.Interface(
16
- fn=process_frame,
17
- inputs=gr.Image(type="pil", label="Upload Image or Use Webcam"), # Directly using gr.Image
18
- outputs=[
19
- gr.Image(type="pil", label="Processed Image"), # Directly using gr.Image
20
- gr.Textbox(label="Blink Count"),
21
- gr.Textbox(label="Hand Gesture Detected"),
22
- gr.Textbox(label="Smartphone Detected")
23
- ],
24
- title="Anti-Spoofing System",
25
- description="Upload an image or capture from webcam to check for spoofing indicators."
26
  )
27
 
28
- iface.launch()
 
 
1
  import gradio as gr
2
+ import tensorflow as tf
3
+ import cv2
4
+ import numpy as np
5
+ import os
6
+ import time
7
+ import dlib
8
+ import mediapipe as mp
9
+ from skimage import feature
10
+ # from your_cnn_model import YourCNNModel # Import your CNN model
11
 
12
+ class AntiSpoofingSystem:
13
+ def __init__(self):
14
+ self.detector = dlib.get_frontal_face_detector()
15
+ self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
16
+ self.mp_hands = mp.solutions.hands
17
+ self.hands = self.mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)
18
+ self.net_smartphone = cv2.dnn.readNet('yolov4.weights', 'yolov4.cfg')
19
+ with open('coco.names', 'r') as f:
20
+ self.classes_smartphone = f.read().strip().split('\n')
21
+ self.EAR_THRESHOLD = 0.25
22
+ self.BLINK_CONSEC_FRAMES = 4
23
+ self.left_eye_state = False
24
+ self.right_eye_state = False
25
+ self.left_blink_counter = 0
26
+ self.right_blink_counter = 0
27
+ self.smartphone_detected = False
28
+ self.smartphone_detection_frame_interval = 30
29
+ self.frame_count = 0
30
+
31
+ def calculate_ear(self, eye):
32
+ A = np.linalg.norm(eye[1] - eye[5])
33
+ B = np.linalg.norm(eye[2] - eye[4])
34
+ C = np.linalg.norm(eye[0] - eye[3])
35
+ return (A + B) / (2.0 * C)
36
+
37
+ def analyze_texture(self, face_region):
38
+ gray_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
39
+ lbp = feature.local_binary_pattern(gray_face, P=8, R=1, method="uniform")
40
+ lbp_hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, 58), range=(0, 58))
41
+ lbp_hist = lbp_hist.astype("float")
42
+ lbp_hist /= (lbp_hist.sum() + 1e-5)
43
+ return np.sum(lbp_hist[:10]) > 0.3
44
+
45
+ def detect_hand_gesture(self, frame):
46
+ results = self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
47
+ return results.multi_hand_landmarks is not None
48
+
49
+ def detect_smartphone(self, frame):
50
+ if self.frame_count % self.smartphone_detection_frame_interval == 0:
51
+ blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
52
+ self.net_smartphone.setInput(blob)
53
+ output_layers_names = self.net_smartphone.getUnconnectedOutLayersNames()
54
+ detections = self.net_smartphone.forward(output_layers_names)
55
+ for detection in detections:
56
+ for obj in detection:
57
+ scores = obj[5:]
58
+ class_id = np.argmax(scores)
59
+ confidence = scores[class_id]
60
+ if confidence > 0.5 and self.classes_smartphone[class_id] == 'cell phone':
61
+ self.smartphone_detected = True
62
+ self.left_blink_counter = 0
63
+ self.right_blink_counter = 0
64
+ return
65
+ self.frame_count += 1
66
+ self.smartphone_detected = False
67
+
68
+ def detect_blink(self, left_ear, right_ear):
69
+ if self.smartphone_detected:
70
+ self.left_eye_state = False
71
+ self.right_eye_state = False
72
+ self.left_blink_counter = 0
73
+ self.right_blink_counter = 0
74
+ return False
75
+
76
+ if left_ear < self.EAR_THRESHOLD:
77
+ if not self.left_eye_state:
78
+ self.left_eye_state = True
79
+ else:
80
+ if self.left_eye_state:
81
+ self.left_eye_state = False
82
+ self.left_blink_counter += 1
83
+
84
+ if right_ear < self.EAR_THRESHOLD:
85
+ if not self.right_eye_state:
86
+ self.right_eye_state = True
87
+ else:
88
+ if self.right_eye_state:
89
+ self.right_eye_state = False
90
+ self.right_blink_counter += 1
91
+
92
+ return self.left_blink_counter > 0 and self.right_blink_counter > 0
93
+
94
+ def run(self, input_image):
95
+ frame = input_image
96
+ blink_count = 0
97
+ hand_gesture_detected = False
98
+ real_person_detected = False
99
+ cropped_face = None
100
+
101
+ self.detect_smartphone(frame)
102
+
103
+ if not self.smartphone_detected:
104
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
105
+ faces = self.detector(gray)
106
+
107
+ for face in faces:
108
+ landmarks = self.predictor(gray, face)
109
+ leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
110
+ rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
111
+
112
+ ear_left = self.calculate_ear(leftEye)
113
+ ear_right = self.calculate_ear(rightEye)
114
+
115
+ if self.detect_blink(ear_left, ear_right):
116
+ blink_count += 1
117
+
118
+ hand_gesture_detected = self.detect_hand_gesture(frame)
119
+
120
+ (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
121
+ cropped_face = frame[max(y - h // 2, 0):min(y + 3 * h // 2, frame.shape[0]),
122
+ max(x - w // 2, 0):min(x + 3 * w // 2, frame.shape[1])]
123
+
124
+ if blink_count >= 5 and hand_gesture_detected and self.analyze_texture(cropped_face):
125
+ real_person_detected = True
126
+ break
127
+
128
+ return real_person_detected, cropped_face
129
+
130
+ # Initialize the anti-spoofing system
131
  anti_spoofing_system = AntiSpoofingSystem()
132
 
133
+ # Load your CNN model (this is a placeholder for your actual model loading code)
134
+ supervised_embedding_model = tf.keras.models.load_model('v3_embedding_model (2).h5')
135
+ #cnn_model.load_weights('v3_embedding_model (2).h5')
136
+
137
  def process_frame(image):
138
+ real_person_detected, cropped_face = anti_spoofing_system.run(image)
139
+
140
+ if not real_person_detected:
141
+ return image, "No real person detected or spoofing attempt."
142
+
143
+ # Placeholder for actual CNN model prediction
144
+ person_id, confidence = "PersonID", 0.99 # Replace with your CNN model logic
145
+
146
+ result_text = f"Person identified: {person_id} with confidence: {confidence}" if person_id else "Person not recognized. Registration required."
147
+ cv2.putText(image, result_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
148
+ return image
149
+
150
+ def video_stream():
151
+ cap = cv2.VideoCapture(0)
152
+ while True:
153
+ ret, frame = cap.read()
154
+ if not ret:
155
+ break
156
+ processed_frame = process_frame(frame)
157
+ yield processed_frame
158
 
159
  iface = gr.Interface(
160
+ fn=video_stream,
161
+ inputs=None,
162
+ outputs=gr.outputs.Video(label="Output Video"),
163
+ live=True,
164
+ title="Live Face Recognition and Verification System",
165
+ description="Live detection and verification of persons from a camera feed."
 
 
 
 
166
  )
167
 
168
+ if __name__ == "__main__":
169
+ iface.launch()