import cv2
import numpy as np
import mediapipe as mp
import time


class DriverBehaviorModule:
    def __init__(self):
        # 初始化MediaPipe手部和面部检测
        self.mp_hands = mp.solutions.hands
        self.mp_face_mesh = mp.solutions.face_mesh
        self.mp_drawing = mp.solutions.drawing_utils

        # 行为状态
        self.behaviors = {
            "smoking": False,
            "drinking": False,
            "phone": False,
            "calling": False,
            "normal": True
        }

        # 连续检测计数，防止误判
        self.behavior_counters = {k: 0 for k in self.behaviors.keys()}
        self.CONSECUTIVE_FRAMES = 15  # 连续检测帧数阈值

    def detect_hands(self, frame):
        """检测手部并返回关键点"""
        with self.mp_hands.Hands(static_image_mode=False,
                                 max_num_hands=2,
                                 min_detection_confidence=0.5) as hands:
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            results = hands.process(rgb_frame)
            return results.multi_hand_landmarks

    def detect_face(self, frame):
        """检测面部并返回关键点"""
        with self.mp_face_mesh.FaceMesh(static_image_mode=False,
                                        max_num_faces=1,
                                        min_detection_confidence=0.5) as face_mesh:
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            results = face_mesh.process(rgb_frame)
            return results.multi_face_landmarks

    def analyze_behavior(self, hand_landmarks, face_landmarks, frame_shape):
        """分析驾驶员行为"""
        h, w, _ = frame_shape
        self.behaviors = {k: False for k in self.behaviors.keys()}
        self.behaviors["normal"] = True  # 默认正常

        if hand_landmarks and face_landmarks:
            # 获取面部中心坐标
            face_centre = face_landmarks[0].landmark[1]  # 面部中心点
            face_x, face_y = int(face_centre.x * w), int(face_centre.y * h)

            for hand in hand_landmarks:
                # 获取手部中心点
                palm = hand.landmark[9]  # 手掌中心
                hand_x, hand_y = int(palm.x * w), int(palm.y * h)

                # 计算手到脸的距离
                face_dist = np.sqrt((hand_x - face_x) ** 2 + (hand_y - face_y) ** 2)

                # 判断是否打电话(手靠近耳朵)
                if 50 < face_dist < 150 and hand_y < face_y:
                    self.behaviors["calling"] = True
                    self.behaviors["normal"] = False

                # 判断是否玩手机(手在脸前方较低位置)
                if 150 < face_dist < 300 and hand_y > face_y:
                    self.behaviors["phone"] = True
                    self.behaviors["normal"] = False

                # 判断是否喝水(手在嘴部附近较高位置)
                if face_dist < 100 and hand_y < face_y:
                    self.behaviors["drinking"] = True
                    self.behaviors["normal"] = False

        # 更新行为计数器，防止误判
        for behavior in self.behaviors:
            if self.behaviors[behavior]:
                self.behavior_counters[behavior] += 1
                if self.behavior_counters[behavior] >= self.CONSECUTIVE_FRAMES:
                    self.behaviors[behavior] = True
                else:
                    self.behaviors[behavior] = False  # 未达到连续帧阈值，不确认
            else:
                self.behavior_counters[behavior] = 0  # 重置计数器

        return self.behaviors

    def process_frame(self, frame):
        """处理单帧图像，返回行为识别结果"""
        annotated_frame = frame.copy()
        h, w, _ = frame.shape

        # 检测手部和面部
        hand_landmarks = self.detect_hands(frame)
        face_landmarks = self.detect_face(frame)

        # 绘制关键点
        if hand_landmarks:
            for hand in hand_landmarks:
                self.mp_drawing.draw_landmarks(
                    annotated_frame, hand, self.mp_hands.HAND_CONNECTIONS)

        if face_landmarks:
            for face in face_landmarks:
                self.mp_drawing.draw_landmarks(
                    annotated_frame, face, self.mp_face_mesh.FACEMESH_CONTOURS,
                    landmark_drawing_spec=None,
                    connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=1))

        # 分析行为
        if hand_landmarks and face_landmarks:
            behaviors = self.analyze_behavior(hand_landmarks, face_landmarks, (h, w, _))

            # 显示行为识别结果qqq
            warning_text = ""
            if behaviors["smoking"]:
                warning_text = "⚠️ 检测到抽烟行为，请安全驾驶！"
            elif behaviors["drinking"]:
                warning_text = "⚠️ 检测到喝水行为，请安全驾驶！"
            elif behaviors["phone"]:
                warning_text = "⚠️ 检测到使用手机，请安全驾驶！"
            elif behaviors["calling"]:
                warning_text = "⚠️ 检测到打电话，请安全驾驶！"

            if warning_text:
                cv2.putText(annotated_frame, warning_text,
                            (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)

        # 返回处理结果
        return {
            "frame": annotated_frame,
            "behaviors": self.behaviors
        }


# 测试代码
if __name__ == "__main__":
    # 初始化模块
    behavior_module = DriverBehaviorModule()

    # 打开摄像头(建议使用朝向驾驶员的摄像头)
    cap = cv2.VideoCapture(0)  # 0为默认摄像头，可替换为视频文件路径

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # 镜像翻转(更符合视觉习惯)
        frame = cv2.flip(frame, 1)

        # 处理帧
        result = behavior_module.process_frame(frame)

        # 显示结果
        cv2.imshow("驾驶员行为识别", result["frame"])

        # 按q退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
