from queue import SimpleQueue
import multiprocessing
import threading
import cv2
import numpy as np
import mediapipe as mp
import logging
from worker import Worker
from config import *

logger = logging.getLogger(__name__)


class HeadGestureDetector(Worker):
    def __init__(self,
                 buffer_commands: multiprocessing.SimpleQueue,
                 buffer_webcam: multiprocessing.SimpleQueue) -> None:
        self._buffer_commands = buffer_commands
        self._buffer_webcam = buffer_webcam
        self._face_landmarker = mp.solutions.face_mesh.FaceMesh(
            min_detection_confidence=0.5,
            min_tracking_confidence=0.5
        )
        self._buffer_image_sequences = SimpleQueue()
        self._capturing_thread = threading.Thread(
            target=self._sequence_capturing_pipeline)
        self._processing_thread = threading.Thread(
            target=self._gesture_processing_pipeline)

    def _get_image_sequence_from_camera(self) -> list[np.ndarray]:
        image_sequence = []
        while len(image_sequence) < LEN_SEQ:
            image = self._buffer_webcam.get()
            image_sequence.append(image)
        return image_sequence

    @staticmethod
    def _rotation_matrix_to_angles(rotation_matrix: np.ndarray) -> np.ndarray:
        x = np.arctan2(rotation_matrix[2, 1], rotation_matrix[2, 2])
        y = np.arctan2(-rotation_matrix[2, 0], np.sqrt(rotation_matrix[0, 0] ** 2 +
                                                       rotation_matrix[1, 0] ** 2))
        z = np.arctan2(rotation_matrix[1, 0], rotation_matrix[0, 0])
        return np.array([x, y, z]) * 180. / np.pi

    def _get_face_keypoints(self, image: np.ndarray) -> np.ndarray:
        face_coordination_in_real_world = np.array([
            [285, 528, 200],
            [285, 371, 152],
            [197, 574, 128],
            [173, 425, 108],
            [360, 574, 128],
            [391, 425, 108]
        ], dtype=np.float64)

        results = self._face_landmarker.process(image)
        h, w, _ = image.shape
        face_coordination = []
        if results.multi_face_landmarks:
            for face_landmarks in results.multi_face_landmarks:
                for idx, lm in enumerate(face_landmarks.landmark):
                    if idx in [1, 9, 57, 130, 287, 359]:
                        x, y = int(lm.x * w), int(lm.y * h)
                        face_coordination.append([x, y])

                face_coordination_in_image = np.array(face_coordination,
                                                      dtype=np.float64)
                focal_length = 1 * w
                cam_matrix = np.array([[focal_length, 0, w / 2],
                                       [0, focal_length, h / 2],
                                       [0, 0, 1]])
                dist_matrix = np.zeros((4, 1), dtype=np.float64)
                success, rotation_vec, transition_vec = cv2.solvePnP(
                    face_coordination_in_real_world, face_coordination_in_image,
                    cam_matrix, dist_matrix)
                rotation_matrix, jacobian = cv2.Rodrigues(rotation_vec)
                result = self._rotation_matrix_to_angles(rotation_matrix)

            return result
        else:
            return np.array([[], [], []])

    def _gesture_processing_pipeline(self) -> None:
        while True:
            image_sequence = self._buffer_image_sequences.get()
            face_keypoints_sequence = [
                [
                    keypoints
                    for image in image_sequence
                    if (keypoints := self._get_face_keypoints(image)).size != 0
                ]
            ]
            if face_keypoints_sequence:
                action = self._detect_head_action(face_keypoints_sequence)
                if action != 'none':
                    logger.info(f'Putting: {action}')
                    self._buffer_commands.put(action)

    def _detect_head_action(self,
                            angles_sequence: list[list[tuple[float, float, float]]]) -> str:
        for angles in angles_sequence:
            prev_angles = None
            for current_angles in angles:
                if prev_angles is not None and current_angles.size != 0:
                    angle_diff = np.array(current_angles) - \
                        np.array(prev_angles)
                    if angle_diff[0] > THRESHOLD_NOD or angle_diff[0] < - \
                            THRESHOLD_NOD:
                        return 'head nod'
                    if angle_diff[1] > THRESHOLD_SHAKE or angle_diff[1] < - \
                            THRESHOLD_SHAKE:
                        return 'head shake'
                prev_angles = current_angles
        return 'none'

    def _sequence_capturing_pipeline(self) -> None:
        while True:
            image_sequence = self._get_image_sequence_from_camera()
            self._buffer_image_sequences.put(image_sequence)

    def run_forever(self) -> None:
        self._capturing_thread.start()
        self._processing_thread.start()
        self._capturing_thread.join()
        self._processing_thread.join()
