import cv2
import sys
import json
import socket
import argparse
import numpy as np
import mediapipe as mp

def pose_routing(cap, sock, address, debug):
  mp_pose = mp.solutions.pose
  mp_drawing = mp.solutions.drawing_utils
  mp_drawing_styles = mp.solutions.drawing_styles
  with mp_pose.Pose(
    model_complexity = 0,
    min_detection_confidence = 0.5,
    min_tracking_confidence = 0.5) as pose:
    while cap.isOpened():
      success, image = cap.read()
      if not success:
        continue
      image.flags.writeable = False
      image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
      results = pose.process(image)
      if results.pose_world_landmarks:
        for idx, landmark in enumerate(results.pose_world_landmarks.landmark):
          screen = results.pose_landmarks.landmark[idx]
          message = json.dumps({
            "index": idx,
            "x": landmark.x,
            "y": landmark.y,
            "z": landmark.z,
            "sx": screen.x,
            "sy": screen.y,
            "sz": screen.z,
            "visibility": landmark.visibility
          })
          sock.sendto(message.encode(), address)
      if debug:
        image.flags.writeable = True
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        mp_drawing.draw_landmarks(
          image,
          results.pose_landmarks,
          mp_pose.POSE_CONNECTIONS,
          landmark_drawing_spec = mp_drawing_styles.get_default_pose_landmarks_style())
        cv2.imshow('MediaPipe Pose', image)
      if cv2.waitKey(5) & 0xFF == 27:
        break

def hands_routing(cap, sock, address, debug):
  mp_drawing = mp.solutions.drawing_utils
  mp_drawing_styles = mp.solutions.drawing_styles
  mp_hands = mp.solutions.hands
  with mp_hands.Hands(
    model_complexity = 0,
    min_detection_confidence = 0.5,
    min_tracking_confidence = 0.5) as hands:
    while cap.isOpened():
      success, image = cap.read()
      if not success:
        continue
      image.flags.writeable = False
      image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
      results = hands.process(image)
      if results.multi_hand_world_landmarks:
        for i in range(0, len(results.multi_hand_world_landmarks)):
          for idx, landmark in enumerate(results.multi_hand_world_landmarks[i].landmark):
            screen = results.multi_hand_landmarks[i].landmark[idx]
            classification = results.multi_handedness[i].classification[0]
            message = json.dumps({
              "index": idx,
              "x": landmark.x,
              "y": landmark.y,
              "z": landmark.z,
              "sx": screen.x,
              "sy": screen.y,
              "sz": screen.z,
              'label': classification.label,
              "visibility": classification.score
            })
            sock.sendto(message.encode(), address)
      if debug:
        image.flags.writeable = True
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        if results.multi_hand_landmarks:
          for hand_landmarks in results.multi_hand_landmarks:
            mp_drawing.draw_landmarks(
              image,
              hand_landmarks,
              mp_hands.HAND_CONNECTIONS,
              mp_drawing_styles.get_default_hand_landmarks_style(),
              mp_drawing_styles.get_default_hand_connections_style())
        cv2.imshow('MediaPipe Pose', cv2.flip(image, 1))
      if cv2.waitKey(5) & 0xFF == 27:
        break

if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument("-m", "--mode", help="pose(default) or hands")
  parser.add_argument("-i", "--device_id", help="camera device id")
  parser.add_argument("-a", "--udp_host", help="udp host server address")
  parser.add_argument("-p", "--udp_port", help="udp port")
  parser.add_argument("-d", "--is_debug_result", help="is show debug image")

  mode = 'pose'
  device_id = 0
  udp_port = 1415
  udp_host = '127.0.0.1'
  is_debug_result = True
  args = parser.parse_args()
  if args.mode:
    mode = args.mode
  if args.device_id:
    device_id = int(args.device_id)
  if args.udp_port:
    udp_port = int(args.udp_port)
  if args.udp_host:
    udp_host = args.udp_host
  if args.is_debug_result:
    is_debug_result = args.is_debug_result == 'true'

  address = (udp_host, udp_port)
  sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  sock.sendto("mediapipe pose start".encode(), address)
  print('udp://' + udp_host + ':' + str(udp_port))

  cap = cv2.VideoCapture(device_id)
  cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
  cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
  if mode == 'pose':
    pose_routing(cap, sock, address, is_debug_result)
  elif mode == 'hands':
    hands_routing(cap, sock, address, is_debug_result)
  cap.release()