import cv2
import numpy as np
import mediapipe as mp
from time import sleep
from google.protobuf.json_format import MessageToDict
import globalvar as gv


Count_to_Sleep=10
Count_to_Submit=4
Max_num_hands=1
mul_q_poses=[[] for i in range(Max_num_hands)]

# 计算姿态编码
def computePose(res_handed, idx_mark):
  # 掌心方向
  cross=np.array([idx_mark[9]-idx_mark[0],idx_mark[17]-idx_mark[5]])
  res_cross=(np.argmax(np.absolute(cross), axis=1)).tolist()
  res_cross[0]=((int(cross[0,res_cross[0]]<0) << 2) | res_cross[0])
  res_cross[1]=((int(cross[1,res_cross[1]]<0) << 2) | res_cross[1])
  # 拇指方向
  zero=idx_mark[0]
  thumb=idx_mark[4]
  four_root=np.array([idx_mark[5],idx_mark[9],idx_mark[13],idx_mark[17]])
  four_tips=np.array([idx_mark[8],idx_mark[12],idx_mark[16],idx_mark[20]])
  thumb_dist=np.linalg.norm(four_root-thumb, axis=1)
  thumb_thr =np.linalg.norm(idx_mark[1]-zero)
  res_thumb=int(np.argmin(thumb_dist))
  if thumb_dist[res_thumb] < thumb_thr:
    if (res_thumb >> 1) == 0:
      res_thumb+=1
  else:
    thumb_dist=np.linalg.norm(four_tips-thumb, axis=1)
    res_thumb=int(np.argmin(thumb_dist))
    if thumb_dist[res_thumb] < thumb_thr:
      res_thumb|=4
    else:
      res_thumb=0
  # 四指指向
  a=np.array(four_tips-four_root)
  b=np.array(four_root-zero)
  cosangle   = (np.linalg.norm(a,axis=1) * np.linalg.norm(b,axis=1))
  cosangle[cosangle==0]=1 #防除0，指尖和指根重合默认90度
  cosangle   = np.sum(a*b,axis=1)/cosangle
  #res_finger = (np.int64(np.floor(cosangle+1.5))).tolist()
  res_finger = (2-np.int64(np.floor(np.abs(1.43*cosangle+1)))).tolist()

  pose=((((((((((((((res_handed\
                <<3)|res_cross[0])\
                <<3)|res_cross[1])\
                <<3)|res_thumb)\
                <<2)|res_finger[0])\
                <<2)|res_finger[1])\
                <<2)|res_finger[2])\
                <<2)|res_finger[3])
  return pose, zero.tolist()

# 计算姿态抖动掩码
def computeUnclear(Count_to_Submit, q_poses, pose):
  res_xnor=0
  not_need=False
  q_poses_len=len(q_poses)
  i=0
  while i<q_poses_len:
    pose_lasti=q_poses[i]
    last_xnor=(pose_lasti[0]^pose)
    if last_xnor==0:
      not_need=True
      pose_lasti[1]=0
      i+=1
    else:
      res_xnor|=last_xnor
      if pose_lasti[1]>=Count_to_Submit:
        q_poses.pop(i)
        q_poses_len-=1
      else:
        pose_lasti[1]+=1
        i+=1
  else:
    if not not_need:
      q_poses.append([pose,0])
  return res_xnor
    
# 输出姿态编码流
def getPoseStream(poses_out_queue=None, END_SIG=None):
  mp_drawing = mp.solutions.drawing_utils
  mp_hands = mp.solutions.hands

  # For webcam input:
  hands = mp_hands.Hands(
      static_image_mode=False,max_num_hands=Max_num_hands,
      min_detection_confidence=0.5, min_tracking_confidence=0.5)
  cap = cv2.VideoCapture(0)
  image_rows = cap.get(3)
  image_cols = cap.get(4)

  count_to_sleep=Count_to_Sleep

  while cap.isOpened():
    success, image = cap.read()
    if not success:
      print("Ignoring empty camera frame.")
      continue
    
    image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
    image.flags.writeable = False
    results = hands.process(image)
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    if results.multi_hand_landmarks:#识别到手
      count_to_sleep=Count_to_Sleep #休眠倒计时
      multi_hand_poses=[]
      multi_hand_sites=[]
      for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
        #对每只手识别
        idx_mark={}
        handedness_dict = MessageToDict(results.multi_handedness[idx])
        # 手性
        res_handed=int(handedness_dict['classification'][0]['index'])
        for idx, landmark in enumerate(hand_landmarks.landmark):
          # 对一个手的每个关键点遍历
          # if ((landmark.HasField('visibility') and
          #   landmark.visibility < VISIBILITY_THRESHOLD) or
          #   (landmark.HasField('presence') and
          #   landmark.presence < PRESENCE_THRESHOLD)):
          #   #sleep(5)#一只识别不完整
          #   break
          idx_mark[idx]=np.array([landmark.x,(1-landmark.y)*image_rows/image_cols,landmark.z])
        else:#for 所有关键点都识别到
          pose, zero = computePose(res_handed, idx_mark)
          multi_hand_poses.append(pose)
          multi_hand_sites.append(zero)
        
      else:#for 多只识别完
        multi_hand_pose_site=[]
        num_hand_poses=len(multi_hand_poses)
        for i_hand, q_poses in enumerate(mul_q_poses):
          if i_hand < num_hand_poses:
            pose = multi_hand_poses[i_hand]
            res_xnor = computeUnclear(Count_to_Submit, q_poses, pose)
            multi_hand_pose_site.insert(0,
              {'pose':[pose,res_xnor], 'site':multi_hand_sites[i_hand]})
          else:
            q_poses.clear()
        if poses_out_queue:
          #*****已识别输出*****
          poses_out_queue.put(multi_hand_pose_site)

    else:
      # 未识别到手
      if count_to_sleep == 0:
        gv.set('SLEEPED',True)
        for q_poses in mul_q_poses:
          q_poses.clear()
        if poses_out_queue:
          #*****未识别输出*****
          poses_out_queue.put([])
        sleep(1)
      else:
        count_to_sleep-=1

    # 是否显示实时画面
    if gv.get('VIEW_HANDS'):
      if results.multi_hand_landmarks:
        # image.fill(255)
        for hand_landmarks in results.multi_hand_landmarks:
          mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
        cv2.imshow('MediaPipe Hands', image)
    else:
      cv2.destroyAllWindows()
    # 是否停止
    key = cv2.waitKey(5) & 0xFF
    if gv.get('TERMINATE') or key == 27:
      poses_out_queue.put(END_SIG)
      break
  # 关闭
  hands.close()
  cap.release()
  if poses_out_queue and END_SIG:
    #*****未识别输出*****
    poses_out_queue.put(END_SIG)
if __name__ == '__main__':
  getPoseStream()