Spaces:
Sleeping
Sleeping
# drive_paddy/detection/strategies/cnn_model.py | |
from src.detection.base_processor import BaseProcessor | |
import numpy as np | |
import torch | |
import torchvision.transforms as transforms | |
from torchvision.models import efficientnet_b7 | |
import cv2 | |
from PIL import Image | |
import os | |
class CnnProcessor(BaseProcessor): | |
""" | |
Drowsiness detection using a pre-trained EfficientNet-B7 model. | |
This version receives face landmarks from another processor instead of using dlib. | |
""" | |
def __init__(self, config): | |
self.settings = config['cnn_model_settings'] | |
self.model_path = self.settings['model_path'] | |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# dlib is no longer needed. | |
# self.face_detector = dlib.get_frontal_face_detector() | |
self.model = self._load_model() | |
self.transform = transforms.Compose([ | |
transforms.Resize((224, 224)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), | |
]) | |
def _load_model(self): | |
"""Loads the EfficientNet-B7 model and custom weights.""" | |
if not os.path.exists(self.model_path): | |
print(f"Error: Model file not found at {self.model_path}") | |
return None | |
try: | |
model = efficientnet_b7() | |
num_ftrs = model.classifier[1].in_features | |
model.classifier[1] = torch.nn.Linear(num_ftrs, 2) | |
model.load_state_dict(torch.load(self.model_path, map_location=self.device)) | |
model.to(self.device) | |
model.eval() | |
print(f"CNN Model '{self.model_path}' loaded successfully on {self.device}.") | |
return model | |
except Exception as e: | |
print(f"Error loading CNN model: {e}") | |
return None | |
def process_frame(self, frame, face_landmarks=None): | |
""" | |
Processes a frame using the CNN model with pre-supplied landmarks. | |
""" | |
if self.model is None or face_landmarks is None: | |
return frame, {"cnn_prediction": False} | |
is_drowsy_prediction = False | |
h, w, _ = frame.shape | |
landmarks = face_landmarks[0].landmark | |
# Calculate bounding box from landmarks | |
x_coords = [lm.x * w for lm in landmarks] | |
y_coords = [lm.y * h for lm in landmarks] | |
x1, y1 = int(min(x_coords)), int(min(y_coords)) | |
x2, y2 = int(max(x_coords)), int(max(y_coords)) | |
# Add some padding to the bounding box | |
padding = 10 | |
x1 = max(0, x1 - padding) | |
y1 = max(0, y1 - padding) | |
x2 = min(w, x2 + padding) | |
y2 = min(h, y2 + padding) | |
# Crop the face | |
face_crop = frame[y1:y2, x1:x2] | |
if face_crop.size > 0: | |
pil_image = Image.fromarray(cv2.cvtColor(face_crop, cv2.COLOR_BGR2RGB)) | |
image_tensor = self.transform(pil_image).unsqueeze(0).to(self.device) | |
with torch.no_grad(): | |
outputs = self.model(image_tensor) | |
_, preds = torch.max(outputs, 1) | |
if preds.item() == 1: # Assuming class 1 is 'drowsy' | |
is_drowsy_prediction = True | |
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 255, 0), 2) | |
label = "Drowsy" if is_drowsy_prediction else "Awake" | |
cv2.putText(frame, f"CNN: {label}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2) | |
return frame, {"cnn_prediction": is_drowsy_prediction} | |