import argparse from scipy.spatial import distance as dist from imutils import face_utils import numpy as np import imutils import time import dlib import cv2 import matplotlib.pyplot as plt from keras.preprocessing.image import img_to_array from keras.models import load_model def eye_brow_distance(leye, reye): global points distq = dist.euclidean(leye, reye) points.append(int(distq)) return distq def emotion_finder(faces, frame): global emotion_classifier EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"] x, y, w, h = face_utils.rect_to_bb(faces) frame = frame[y:y + h, x:x + w] roi = cv2.resize(frame, (64, 64)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds = emotion_classifier.predict(roi)[0] emotion_probability = np.max(preds) label = EMOTIONS[preds.argmax()] return label def normalize_values(points, disp): normalized_value = abs(disp - np.min(points)) / abs(np.max(points) - np.min(points)) stress_value = np.exp(-(normalized_value)) return stress_value def stress(video_path, duration): global points, emotion_classifier detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor("models/data") emotion_classifier = load_model("models/_mini_XCEPTION.102-0.66.hdf5", compile=False) cap = cv2.VideoCapture(video_path) points = [] stress_labels = [] start_time = time.time() while True: current_time = time.time() if current_time - start_time >= duration: break ret, frame = cap.read() if not ret: break frame = cv2.flip(frame, 1) frame = imutils.resize(frame, width=500, height=500) (lBegin, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"] (rBegin, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) try: detections = detector(gray, 0) for detection in detections: emotion = emotion_finder(detection, gray) shape = predictor(gray, detection) shape = face_utils.shape_to_np(shape) leyebrow = shape[lBegin:lEnd] reyebrow = shape[rBegin:rEnd] distq = eye_brow_distance(leyebrow[-1], reyebrow[0]) stress_value = normalize_values(points, distq) # Determine stress label for this frame if emotion in ['scared', 'sad', 'angry'] and stress_value >= 0.75: stress_label = 'stressed' else: stress_label = 'not stressed' # Store stress label in list stress_labels.append(stress_label) except Exception as e: print(f'Error: {e}') key = cv2.waitKey(1) & 0xFF if key == ord('q'): break cap.release() # Count occurrences of 'stressed' and 'not stressed' stressed_count = stress_labels.count('stressed') not_stressed_count = stress_labels.count('not stressed') # Determine which label occurred more frequently if stressed_count > not_stressed_count: most_frequent_label = 'stressed' else: most_frequent_label = 'not stressed' return stressed_count, not_stressed_count, most_frequent_label def main(): # Argument parsing parser = argparse.ArgumentParser(description='Stress Detection from Video') parser.add_argument('--video', type=str, required=True, default='output.mp4', help='Path to the input video file') parser.add_argument('--duration', type=int, default=30, help='Duration for analysis in seconds') args = parser.parse_args() # Call the stress function and get the results stressed_count, not_stressed_count, most_frequent_label = stress(args.video, args.duration) # Display the result print(f"Stressed frames: {stressed_count}") print(f"Not stressed frames: {not_stressed_count}") print(f"Most frequent state: {most_frequent_label}") if __name__ == '__main__': main()