DaMsTaR's picture
Upload 29 files
2bf3ef1 verified
raw
history blame
2.12 kB
import cv2
from gaze_tracking import GazeTracking
# Replace 'video.mp4' with the path to your video file
video_path = 'apvzjkvnwn.mp4'
# Create a video capture object
cap = cv2.VideoCapture(video_path)
# Check if video opened successfully
if not cap.isOpened():
print("Error opening video!")
exit()
# Define output filename (change extension if needed)
output_filename = "gaze_tracked_output.avi"
# Define video writer settings (adjust as needed)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_filename, fourcc, 20.0, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
gaze = GazeTracking()
while True:
# Read a frame from the video
ret, frame = cap.read()
# Check if frame is read correctly
if not ret:
print("No frame captured from video. Exiting...")
break
# Send the frame to GazeTracking for analysis
gaze.refresh(frame)
# Get the annotated frame with gaze information
annotated_frame = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_center():
text = "Looking center"
# Add text overlay on the frame
cv2.putText(annotated_frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
# Get pupil coordinates
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
# Add pupil coordinates overlay
cv2.putText(annotated_frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(annotated_frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
# Display the frame
cv2.imshow("Demo", annotated_frame)
# Write the annotated frame to the output video
out.write(annotated_frame)
# Exit on 'Esc' key press
if cv2.waitKey(1) == 27:
break
# Release resources
cap.release()
out.release()
cv2.destroyAllWindows()