from queue import SimpleQueue
import threading
import multiprocessing
import numpy as np
import mediapipe as mp
import logging
from worker import Worker
from config import *

logger = logging.getLogger(__name__)


class FacialExpressionDetector(Worker):
    _NUM_GESTURE_PROCESSORS = 3

    def __init__(self,
                 buffer_commands: multiprocessing.SimpleQueue,
                 buffer_webcam: multiprocessing.SimpleQueue) -> None:
        self._buffer_commands = buffer_commands
        self._buffer_webcam = buffer_webcam
        self._facial_landmarker = mp.solutions.holistic.Holistic()
        self._buffer_image_sequences = SimpleQueue()
        self._capturing_thread = threading.Thread(
            target=self._sequence_capturing_pipeline)
        self._processing_threads = [
            threading.Thread(target=self._gesture_processing_pipeline)
            for _ in range(self._NUM_GESTURE_PROCESSORS)
        ]

    def _get_image_sequence_from_camera(self) -> list[np.ndarray]:
        image_sequence = []
        while len(image_sequence) < LEN_SEQ:
            image = self._buffer_webcam.get()
            image_sequence.append(image)
        return image_sequence

    def _get_coordinates(self, image: np.ndarray) -> list[tuple[float, float]]:
        results = self._facial_landmarker.process(image)
        mouse_points = {61, 185, 40, 39, 37, 0, 267, 269, 270,
                        409, 291, 375, 321, 405, 314, 17, 34, 181, 91, 146}
        if results.face_landmarks:
            return [
                (all_mark[0], all_mark[1])
                for i, all_mark in
                enumerate([(landmark.x, landmark.y, landmark.z)
                           for landmark in results.face_landmarks.landmark])
                if i in mouse_points
            ]
        else:
            return []

    def _get_gesture(
            self, seq_coords: list[list[tuple[float, float, float]]]) -> str:
        for coordinate in seq_coords:
            _0, _17 = 5, 15
            if coordinate[_17][1] - coordinate[_0][1] > 0.1:
                return 'mouth open'
            _61, _291 = 0, 10
            if coordinate[_61][0] - coordinate[_291][0] > 0.1:
                return 'mouth smile'
        return 'none'

    def _sequence_capturing_pipeline(self) -> None:
        while True:
            image_sequence = self._get_image_sequence_from_camera()
            self._buffer_image_sequences.put(image_sequence)

    def _gesture_processing_pipeline(self) -> None:
        while True:
            image_sequence = self._buffer_image_sequences.get()
            seq_coordinates = [
                coords
                for image in image_sequence
                if (coords := self._get_coordinates(image))
            ]
            gesture = self._get_gesture(seq_coordinates)
            print('Gesture: ', gesture)
            if gesture != 'none':
                logger.info(f'Putting: {gesture}')
                self._buffer_commands.put(gesture)

    def run_forever(self) -> None:
        self._capturing_thread.start()
        for thread in self._processing_threads:
            thread.start()
        self._capturing_thread.join()
        for thread in self._processing_threads:
            thread.join()
