#!/usr/bin/python3
import time
import cv2
import math
import operator
import numpy as np
import mediapipe as mp
import rospy
from gesture.msg import *
# from cv_bridge import CvBridge
# from sensor_msgs.msg import Image
# from threading import Timer
# from UDP import UDP_Manager
import threading
import time
class GestureDetect:
    def __init__(self):
        rospy.init_node('gesture_recogniton', anonymous=True)
        self.param_init()
        self.rate = rospy.Rate(self.rospy_rate)
        # self.bridge = CvBridge()
        self.topic_init()

        self.mp_drawing = mp.solutions.drawing_utils
        self.mp_hands = mp.solutions.hands
        self.hands =  self.mp_hands.Hands(
            min_detection_confidence=0.85, min_tracking_confidence=0.75, max_num_hands=2)
        self.gesture_last_name = ""
        self.gesture_frame = 0

        self.try_times = 0
        while(1):
            try:
                self.cap = cv2.VideoCapture(self.video)
                self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
                self.cap.read()
                break
            except:
                self.try_times += 1
                if self.try_times > 10:
                    break

        self.image = None
        # self.t_flag = True
        # self.t = threading.Thread(target=self.get_img,args=(lambda : self.t_flag,))
        # self.t.start()


        self.jpegQuality = 20
        self.img_width = 640
        self.img_height = 480
        self.errImg = np.zeros((self.img_height, self.img_width, 3), np.uint8)
        self.errImg[:,0:200] = [0, 0, 255]
        self.errImg[:,220:420] = [0, 255, 0]
        self.errImg[:,440:640] = [255, 0, 0]
        self.errImgData = cv2.imencode('.jpg', self.errImg, (cv2.IMWRITE_JPEG_QUALITY, self.jpegQuality))[1].tobytes()

        # self.udp  = UDP_Manager(self.rev_data_cb)
        # self.udp.Start()
        # self.udp.targetDict[('127.0.0.1',1111)] = 1

    def rev_data_cb(self,recvData, recvAddr):
        return

    def param_init(self):
        self.pkg_path = rospy.get_param('/pkg_path/gesture')
        self.video = rospy.get_param("/cam_index")
        self.rospy_rate = rospy.get_param("/rospy_rate")

    def topic_init(self):
        self.gesture_detect_pub = rospy.Publisher('/gesture_detect_topic', gesture_msg, queue_size=1)
        # self.gesture_image = rospy.Publisher('/gesture_image', Image, queue_size=1)

    def gesture_detect(self):
        success, self.image = self.cap.read()
        if self.image is None:
            return
        start = time.time()  # 开始时
        image_height, image_width, _ = self.image.shape
        self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
        self.image.flags.writeable = False

        
        results = self.hands.process(self.image)

        self.image.flags.writeable = True
        self.image = cv2.cvtColor(self.image, cv2.COLOR_RGB2BGR)
        gesture_name = ""

        
        if results.multi_hand_landmarks:
            bend_angle = [[0 for i in range(5)] for j in range(2)] # 手指弯曲角度
            bool_bend = [[0 for i in range(5)] for j in range(2)]  # 手指弯曲标志
            tip_dist = [[0 for i in range(4)] for j in range(2)]  # 拇指和各个手指之间的距离
            dir_vec = [0 for i in range(2)]  # 拳峰向量
            hand_num = len(results.multi_hand_landmarks)
            for (i, hand_landmarks) in enumerate(results.multi_hand_landmarks):
                bend_angle[i] = list(self.cal_bend_angle(hand_landmarks, image_width, image_height))
                bool_bend[i] = list(self.is_bend(bend_angle[i]))
                dir_vec[i] = list(self.cal_dir_vec(hand_landmarks, image_width, image_height))
                # print(np.linalg.norm(dir_vec[0]))
                tip_dist[i] = list(self.cal_tip_dist(hand_landmarks, image_width, image_height)) / np.linalg.norm(
                    dir_vec[i])  # 标准化距离
                self.mp_drawing.draw_landmarks(
                    self.image, hand_landmarks, self.mp_hands.HAND_CONNECTIONS)
            if (hand_num == 1):  # 单手手势：上/下/左/右/圆形
                if (operator.eq(bool_bend[0], [0, 1, 1, 1, 1])):
                    sum_tip_dist = 0
                    for i in range(4):
                        sum_tip_dist += tip_dist[0][i]
                    avg_tip_dist = sum_tip_dist / 4
                    # print(avg_tip_dist)
                    if (avg_tip_dist > 0.7):  # 大拇指手势衍生->上下左右
                        hand_landmarks = results.multi_hand_landmarks[0]
                        dx = (hand_landmarks.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_MCP].x -
                              hand_landmarks.landmark[
                                  self.mp_hands.HandLandmark.PINKY_MCP].x) * image_width
                        dx = np.fabs(dx)
                        dy = (hand_landmarks.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_MCP].y -
                              hand_landmarks.landmark[
                                  self.mp_hands.HandLandmark.PINKY_MCP].y) * image_height
                        dy = np.fabs(dy)
                        if (dx < dy):  # 上下
                            if (hand_landmarks.landmark[self.mp_hands.HandLandmark.THUMB_TIP].y <
                                    hand_landmarks.landmark[
                                        self.mp_hands.HandLandmark.WRIST].y):
                                cv2.putText(self.image, "UP", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200),
                                            5)
                                gesture_name = "UP"
                            elif (hand_landmarks.landmark[self.mp_hands.HandLandmark.THUMB_TIP].y >
                                  hand_landmarks.landmark[
                                      self.mp_hands.HandLandmark.WRIST].y):
                                cv2.putText(self.image, "DOWN", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200),
                                            5)
                                gesture_name = "DOWN"
                        elif (dx > dy):  # 左右
                            if (hand_landmarks.landmark[self.mp_hands.HandLandmark.THUMB_TIP].x <
                                    hand_landmarks.landmark[
                                        self.mp_hands.HandLandmark.WRIST].x):
                                cv2.putText(self.image, "LEFT", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200),
                                            5)
                                gesture_name = "LEFT"
                            elif (hand_landmarks.landmark[self.mp_hands.HandLandmark.THUMB_TIP].x >
                                  hand_landmarks.landmark[
                                      self.mp_hands.HandLandmark.WRIST].x):
                                cv2.putText(self.image, "RIGHT", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2,
                                            (0, 0, 200), 5)
                                gesture_name = "RIGHT"
                    else:  # 圆形手势
                        cv2.putText(self.image, "ROUND", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                        gesture_name = "ROUND"
                elif (operator.eq(bool_bend[0], [0, 0, 0, 0, 0])):  # 前后
                    is_vertical = np.dot(dir_vec[0]/np.linalg.norm(dir_vec[0]), np.array([0, -1, 0]))
                    if(is_vertical > 0.8):
                        depth = (hand_landmarks.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].z -
                                 hand_landmarks.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_MCP].z) * image_width
                        depth /= np.linalg.norm(dir_vec[0])
                        # print(depth)
                        if (depth < -0.3):
                            cv2.putText(self.image, "BACKWARD", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                            gesture_name = "BACKWARD"
                        elif (depth > 0.3):
                            cv2.putText(self.image, "FORWARD", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                            gesture_name = "FORWARD"
                elif (operator.eq(bool_bend[0], [1, 1, 1, 1, 1])):  # 停止
                    cv2.putText(self.image, "STOP", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                    gesture_name = "STOP"
            elif (hand_num == 2):  # 双手手势：三角/矩形/一字
                hand_landmarkA = results.multi_hand_landmarks[0]
                hand_landmarkB = results.multi_hand_landmarks[1]
                avg_dir_norm = (np.linalg.norm(dir_vec[0]) + np.linalg.norm(dir_vec[1])) / 2
                if (operator.eq(bool_bend[0], [0, 0, 0, 0, 0]) and operator.eq(bool_bend[1],
                                                                               [0, 0, 0, 0, 0])):  # 三角/矩形
                    left_thumb_vector = np.array(
                        [(hand_landmarkA.landmark[self.mp_hands.HandLandmark.THUMB_TIP].x) * image_width,
                         (hand_landmarkA.landmark[self.mp_hands.HandLandmark.THUMB_TIP].y) * image_height])
                    left_index_vector = np.array(
                        [(hand_landmarkA.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].x) * image_width,
                         (hand_landmarkA.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].y) * image_height])
                    right_thumb_vector = np.array(
                        [(hand_landmarkB.landmark[self.mp_hands.HandLandmark.THUMB_TIP].x) * image_width,
                         (hand_landmarkB.landmark[self.mp_hands.HandLandmark.THUMB_TIP].y) * image_height])
                    right_index_vector = np.array(
                        [(hand_landmarkB.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].x) * image_width,
                         (hand_landmarkB.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].y) * image_height])
                    dist44 = np.linalg.norm(left_thumb_vector - right_thumb_vector)/avg_dir_norm
                    dist48 = np.linalg.norm(left_thumb_vector - right_index_vector)/avg_dir_norm
                    dist84 = np.linalg.norm(left_index_vector - right_thumb_vector)/avg_dir_norm
                    dist88 = np.linalg.norm(left_index_vector - right_index_vector)/avg_dir_norm
                    # print(dist44, dist48, dist84, dist88)
                    if (dist44 < 0.35 and dist88 < 0.35):
                        cv2.putText(self.image, "TRIANGLE", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                        gesture_name = "TRIANGLE"
                    elif (dist48 < 0.35 and dist84 < 0.35):
                        cv2.putText(self.image, "RECTANGLE", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                        gesture_name = "RECTANGLE"
                elif (operator.eq(bool_bend[0], [0, 0, 0, 1, 1]) and operator.eq(bool_bend[1],
                                                                                 [0, 0, 0, 1, 1])):  # 一字
                    left_index_vector = np.array(
                        [(hand_landmarkA.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].x) * image_width,
                         (hand_landmarkA.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].y) * image_height])
                    left_middle_vector = np.array(
                        [(hand_landmarkA.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x) * image_width,
                         (hand_landmarkA.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y) * image_height])
                    right_index_vector = np.array(
                        [(hand_landmarkB.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].x) * image_width,
                         (hand_landmarkB.landmark[self.mp_hands.HandLandmark.INDEX_FINGER_TIP].y) * image_height])
                    right_middle_vector = np.array(
                        [(hand_landmarkB.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x) * image_width,
                         (hand_landmarkB.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y) * image_height])
                    dist88 = np.linalg.norm(left_index_vector - right_index_vector)/avg_dir_norm
                    dist1212 = np.linalg.norm(left_middle_vector - right_middle_vector)/avg_dir_norm
                    # print(dist88, dist1212)
                    if (dist88 < 0.50  and dist1212 < 0.20):
                        cv2.putText(self.image, "LINE", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 200), 5)
                        gesture_name = "LINE"
        if (operator.eq(gesture_name, "") == False):
            if (operator.eq(gesture_name, self.gesture_last_name)):
                self.gesture_frame += 1
                if (self.gesture_frame >= 3):
                    self.gesture_frame = 0
                    self.gesture_detect_pub.publish(gesture_msg(gesture_name,"Gesture: %s" % gesture_name))
            else:
                self.gesture_frame = 0
        else:
            self.gesture_frame = 0
        self.gesture_last_name = gesture_name
        end = time.time()  # 结束时间
        fps = 1 / (end - start)  # 计算帧率
        cv2.putText(self.image, "%.0f" % fps, (25, 25), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 200), 2)  # 显示帧率
        cv2.imshow("win",self.image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            return
        # img_msg = self.bridge.cv2_to_imgmsg(self.image, 'rgb8')
        # self.gesture_image.publish(img_msg)
        data = cv2.imencode('.jpg',self.image, (cv2.IMWRITE_JPEG_QUALITY, self.jpegQuality))[1].tobytes()
        # if len(data) < 64000:
        #     self.udp.Send(data)
        # else:
        #     self.udp.Send(self.errImgData)


    def cal_bend_angle(self,hand_landmarks, width, height):
        bone_a_vector = np.array([0,0,0])
        bone_b_vector = np.array([0,0,0])
        bend_angle = [0] * 5
        for i in range(5):
            for j in range(2):
                #靠近腕部关节
                if j == 0:
                    bone_a_vector = np.array(
                        [(hand_landmarks.landmark[4 * i + 1].x - hand_landmarks.landmark[0].x) * width,
                        (hand_landmarks.landmark[4 * i + 1].y - hand_landmarks.landmark[0].y) * height,
                        (hand_landmarks.landmark[4 * i + 1].z - hand_landmarks.landmark[0].z) * width])
                    bone_b_vector = np.array(
                        [(hand_landmarks.landmark[4 * i + 2].x - hand_landmarks.landmark[4 * i + 1].x) * width,
                        (hand_landmarks.landmark[4 * i + 2].y - hand_landmarks.landmark[4 * i + 1].y) * height,
                        (hand_landmarks.landmark[4 * i + 2].z - hand_landmarks.landmark[4 * i + 1].z) * width])
                else:
                    bone_a_vector = np.array(
                        [(hand_landmarks.landmark[4 * i + 1 + j].x - hand_landmarks.landmark[4 * i + j].x) * width,
                        (hand_landmarks.landmark[4 * i + 1 + j].y - hand_landmarks.landmark[4 * i + j].y) * height,
                        (hand_landmarks.landmark[4 * i + 1 + j].z - hand_landmarks.landmark[4 * i + j].z) * width])
                    bone_b_vector = np.array(
                        [(hand_landmarks.landmark[4 * i + 2 + j].x - hand_landmarks.landmark[4 * i + 1 + j].x) * width,
                        (hand_landmarks.landmark[4 * i + 2 + j].y - hand_landmarks.landmark[4 * i + 1 + j].y) * height,
                        (hand_landmarks.landmark[4 * i + 2 + j].z - hand_landmarks.landmark[4 * i + 1 + j].z) * width])
            bend_angle[i] += np.arccos(np.dot(bone_a_vector, bone_b_vector) / (np.linalg.norm(bone_a_vector) * np.linalg.norm(bone_b_vector))) * 180 / math.pi
        return bend_angle


    def is_bend(self,bend_angle):
        bool_bend = [0] * 5
        for i in range(5):
            if (bend_angle[i] > 50):
                bool_bend[i] = 1
        return bool_bend


    def cal_tip_dist(self,hand_landmarks, width, height):
        tip_dist = [0] * 4
        for i in range(4):
            tip_dist_vector = np.array(
                [(hand_landmarks.landmark[8 + 4 * i].x - hand_landmarks.landmark[4].x) * width,
                (hand_landmarks.landmark[8 + 4 * i].y - hand_landmarks.landmark[4].y) * height,
                (hand_landmarks.landmark[8 + 4 * i].z - hand_landmarks.landmark[4].z) * width])
            tip_dist[i] = np.linalg.norm(tip_dist_vector)
        return tip_dist


    def cal_dir_vec(self,hand_landmarks, width, height):
        dir_vector = np.array(
            [(hand_landmarks.landmark[5].x - hand_landmarks.landmark[17].x) * width,
            (hand_landmarks.landmark[5].y - hand_landmarks.landmark[17].y) * height,
            (hand_landmarks.landmark[5].z - hand_landmarks.landmark[17].z) * width]
        )
        return dir_vector

    def MainLoop(self):
        while not rospy.is_shutdown():
            self.gesture_detect()
            self.rate.sleep()

if __name__ == '__main__':
    node1 = GestureDetect()
    node1.MainLoop()
