from queue import SimpleQueue
import threading
import multiprocessing
import numpy as np
import mediapipe as mp
import logging
from worker import Worker
from config import *

if USE_ML:
    from ML import predict


logger = logging.getLogger(__name__)


class HandGestureDetector(Worker):
    _NUM_GESTURE_PROCESSORS = 3

    def __init__(self,
                 buffer_commands: multiprocessing.SimpleQueue,
                 buffer_webcam: multiprocessing.SimpleQueue) -> None:
        self._buffer_commands = buffer_commands
        self._buffer_webcam = buffer_webcam
        self._hand_landmarker = mp.solutions.hands.Hands(
            min_detection_confidence=0.5,
            min_tracking_confidence=0.5
        )
        self._buffer_image_sequnces = SimpleQueue()
        self._capturing_thread = threading.Thread(
            target=self._sequence_capturing_pipeline)
        self._processing_threads = [
            threading.Thread(target=self._gesture_processing_pipeline)
            for _ in range(self._NUM_GESTURE_PROCESSORS)
        ]

    def _get_image_sequence_from_camera(self) -> list[np.ndarray]:
        image_sequence = []
        while len(image_sequence) < LEN_SEQ:
            image = self._buffer_webcam.get()
            image_sequence.append(image)
        return image_sequence

    def _get_coordinates(self,
                         image: np.ndarray) -> list[tuple[float, float, float]]:
        results = self._hand_landmarker.process(image)
        if results.multi_hand_landmarks:
            return [
                (landmark.x, landmark.y, landmark.z)
                for landmark in results.multi_hand_landmarks[0].landmark
            ]
        else:
            return []

    def _calculate_velocity(self,
                            current_landmarks: list[tuple[float, float, float]],
                            prev_landmarks: list[tuple[float, float, float]]) -> list[float]:
        return [
            np.sqrt(
                (cl[0] - pl[0]) ** 2
                + (cl[1] - pl[1]) ** 2
                + (cl[2] - pl[2]) ** 2
            ) / VELOCITY_INTERVAL
            for cl, pl in zip(current_landmarks, prev_landmarks)
        ]

    def _contains_gesture(self,
                          coordinates: list[list[tuple[float, float, float]]]) -> bool:
        prev_landmarks = []
        for current_landmarks in coordinates:
            if prev_landmarks:
                velocities = self._calculate_velocity(
                    current_landmarks, prev_landmarks)
                if any(velocity > THRESHOLD_VELOCITY for velocity in velocities):
                    return True
            prev_landmarks = current_landmarks
        return False

    def finger_stretch_detect(point1, point2, point3):
        # 计算向量的L2范数来判断
        result = 0
        dist1 = np.linalg.norm((point2 - point1), ord=2)
        dist2 = np.linalg.norm((point3 - point1), ord=2)
        if dist2 > dist1:
            result = 1
        return result

    # 静态手势检测主要函数

    def _static_gesture(self,
                        coordinates: list[list[tuple[float, float, float]]]) -> bool:
        for landmark in coordinates:
            # 判断ok的辅助条件
            dist = np.linalg.norm(landmark[4] - landmark[8], axis=0)
            if dist < THRESHOLD_OK:
                ok = True
            else:
                ok = False
            # 各个手指的伸弯情况
            figure = np.zeros(5)
            for k in range(5):
                if k == 0:
                    figure_ = self.finger_stretch_detect(
                        landmark[17], landmark[4*k+2], landmark[4*k+4])
                else:
                    figure_ = self.finger_stretch_detect(
                        landmark[0], landmark[4*k+2], landmark[4*k+4])

                figure[k] = figure_
            gesture_result = self._get_static_gesture(figure, ok)
            if gesture_result:
                return gesture_result
        return 'none'

    # 用于静态手势检测，被_static_gesture调用
    def _get_static_gesture(result, ok):
        if np.array_equal(result, [1, 0, 0, 0, 0]):
            gesture = "favorite"
        # 可能与动态冲突，具体看动态的程序设计
        # elif np.array_equal(result, [0, 1, 0, 0, 0]):
        #     gesture = "one"
        elif np.array_equal(result, [0, 1, 1, 0, 0]):
            gesture = "two"
        elif np.array_equal(result, [0, 1, 1, 1, 0]):
            gesture = "three"
        elif np.array_equal(result, [0, 1, 1, 1, 1]):
            gesture = "four"
        # 可能与动态冲突
        # elif np.array_equal(result, [1, 1, 1, 1, 1]):
        #     gesture = "five"
        elif np.array_equal(result, [1, 0, 0, 0, 1]):
            gesture = "six"
        elif np.array_equal(result, [0, 0, 0, 0, 0]):
            gesture = "stone"
        elif ok is True and (result[2] == 1) and (result[3] == 1) and (result[4] == 1):
            gesture = "screenshot"
        else:
            gesture = None
        return gesture

    def _get_gesture(self,
                     coordinates: list[list[tuple[float, float, float]]]) -> str:
        prev_forefinger_tip_position = None
        is_pinching = False
        result_vertical = []
        result_horizontal = []
        for frame_landmark in coordinates:
            current_forefinger_tip_position = frame_landmark[INDEX_FOREFINGER_TIP]
            current_thumb_tip_position = frame_landmark[INDEX_THUMB_TIP]
            distance = ((current_forefinger_tip_position[0] - current_thumb_tip_position[0]) ** 2 +
                        (current_forefinger_tip_position[1] - current_thumb_tip_position[1]) ** 2) ** 0.5
            is_pinching = distance < THRESHOLD_DISTANCE_FINGERTIPS

            if prev_forefinger_tip_position:
                vertical_displacement = current_forefinger_tip_position[1] - \
                    prev_forefinger_tip_position[1]
                result_vertical.append(vertical_displacement)
                horizontal_displacement = current_forefinger_tip_position[0] - \
                    prev_forefinger_tip_position[0]
                result_horizontal.append(horizontal_displacement)

            prev_forefinger_tip_position = current_forefinger_tip_position

        if is_pinching:
            return 'hand pinch'
        else:
            sum_vertical = sum(result_vertical)
            sum_horizontal = sum(result_horizontal)
            if abs(sum_vertical) > abs(sum_horizontal):
                if sum_vertical >= THRESHOLD_FOREFINGER_MOVEMENT:
                    return 'hand down'
                elif sum_vertical <= -THRESHOLD_FOREFINGER_MOVEMENT:
                    return 'hand up'
            else:
                if sum_horizontal >= THRESHOLD_FOREFINGER_MOVEMENT:
                    return 'hand right'
                elif sum_horizontal <= -THRESHOLD_FOREFINGER_MOVEMENT:
                    return 'hand left'
        if USE_ML:
            wanted_gestures = ['six', 'eight']
            for f in coordinates:
                res = predict(f)
                if res in wanted_gestures:
                    print(res)
                    return res
        return 'none'

    def _sequence_capturing_pipeline(self) -> None:
        while True:
            image_sequence = self._get_image_sequence_from_camera()
            self._buffer_image_sequnces.put(image_sequence)

    def _gesture_processing_pipeline(self) -> None:
        while True:
            image_sequence = self._buffer_image_sequnces.get()
            coordinates = [
                coords
                for image in image_sequence
                if (coords := self._get_coordinates(image))
            ]

            # 静态检测
            static_gesture = self._static_gesture(coordinates)
            if static_gesture != 'none':
                logger.info(f'Putting: {static_gesture}')
                self._buffer_commands.put(gesture)

            if self._contains_gesture(coordinates):
                gesture = self._get_gesture(coordinates)
                if gesture != 'none':
                    logger.info(f'Putting: {gesture}')
                    self._buffer_commands.put(gesture)

    def run_forever(self) -> None:
        self._capturing_thread.start()
        for thread in self._processing_threads:
            thread.start()
        self._capturing_thread.join()
        for thread in self._processing_threads:
            thread.join()
