import mediapipe as mp
from tqdm import tqdm
import numpy as np
from ..util.log import get_logger
from pathlib import Path
logger = get_logger(__name__)

BaseOptions = mp.tasks.BaseOptions
FaceLandmarker = mp.tasks.vision.FaceLandmarker
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
VisionRunningMode = mp.tasks.vision.RunningMode

# %% mp2dlib
mp2dlib_correspondence = [
    
    ## Face Contour
    [127],       # 1
    [234],       # 2
    [93],        # 3
    [132, 58],   # 4
    [58, 172],   # 5
    [136],       # 6
    [150],       # 7
    [176],       # 8
    [152],       # 9
    [400],       # 10
    [379],       # 11
    [365],       # 12
    [397, 288],  # 13
    [361],       # 14
    [323],       # 15
    [454],       # 16
    [356],       # 17
    
    ## Right Brow 
    [70],        # 18
    [63],        # 19
    [105],       # 20
    [66],        # 21
    [107],       # 22
    
    ## Left Brow
    [336],       # 23
    [296],       # 24
    [334],       # 25
    [293],       # 26
    [300],       # 27
    
    ## Nose
    [168, 6],    # 28
    [197, 195],  # 29
    [5],         # 30
    [4],         # 31
    [75],        # 32
    [97],        # 33
    [2],         # 34
    [326],       # 35
    [305],       # 36
    
    ## Right Eye
    [33],        # 37
    [160],       # 38
    [158],       # 39
    [133],       # 40
    [153],       # 41
    [144],       # 42
    
    ## Left Eye
    [362],       # 43
    [385],       # 44
    [387],       # 45
    [263],       # 46
    [373],       # 47
    [380],       # 48
    
    ## Upper Lip Contour Top
    [61],        # 49
    [39],        # 50
    [37],        # 51
    [0],         # 52
    [267],       # 53
    [269],       # 54
    [291],       # 55
    
    ## Lower Lip Contour Bottom
    [321],   # 56
    [314],   # 57
    [17],   # 58
    [84],   # 59
    [91],   # 60
    
    ## Upper Lip Contour Bottom
    [78],   # 61
    [82],   # 62
    [13],   # 63
    [312],   # 64
    [308],   # 65
    
    ## Lower Lip Contour Top
    [317],   # 66
    [14],   # 67
    [87],   # 68
]

# %%
class LandmarkDetectorMP:
  def __init__(
      self,
      model_path = "flame2gs/jobs/face_landmarker.task",
  ):

      logger.info("Initialize Mediapipe module...")
      self.fps = 25
      self.model_path = model_path  
      self.options = FaceLandmarkerOptions(base_options=BaseOptions(model_asset_path=model_path, 
                                                      #  delegate=BaseOptions.Delegate.GPU
                                                        ),
                              output_face_blendshapes=True,
                              output_facial_transformation_matrixes=False,
                              running_mode=VisionRunningMode.VIDEO,
                              num_faces=1)
      
  @staticmethod 
  def convert_to_2dlib(face_landmarks, correspondence=mp2dlib_correspondence):
    face_landmarks_2dlib = np.zeros((68, 3))
    for i, targets in enumerate(correspondence):
        targets_pts = [(face_landmarks[j].x, face_landmarks[j].y, face_landmarks[j].z) for j in targets]
        pt_avg = np.mean(targets_pts, axis=0)
        face_landmarks_2dlib[i, :3] = pt_avg
    return face_landmarks_2dlib
        
  def detect_dataset(self, dataloader):
    logger.info("Initialize Landmark Detector (Mediapipe)...")
    # 68 facial landmark detector

    landmarks = {}
    bboxes = {}
    blendshape_scores = {}

    logger.info("Begin annotating landmarks...")
    with FaceLandmarker.create_from_options(self.options) as landmarker:
      for item in tqdm(dataloader):
          timestep_id = item["timestep_id"][0]
          frame_timestamp_ms = int(1000 * int(timestep_id) / self.fps)
          camera_id = item["camera_id"][0]

          logger.info(
              f"Annotate facial landmarks for timestep: {timestep_id}, camera: {camera_id}"
          )
          img = item["rgb"][0].numpy()
          mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img)
          face_landmarker_result = landmarker.detect_for_video(mp_image, frame_timestamp_ms)
          if len(face_landmarker_result.face_landmarks) == 0:
            logger.error(
                f"No bbox found for frame: {timestep_id}, camera: {camera_id}. Setting landmarks to all -1."
            )
            lmks = np.ones((68, 3)) * -1
            blendshape_score = np.zeros(52)
          else:
            face_landmarks = face_landmarker_result.face_landmarks[0] 
            lmks = self.convert_to_2dlib(face_landmarks)
            blendshape_score = np.array([x.score for x in face_landmarker_result.face_blendshapes[0]])
            
          if camera_id not in landmarks:
              landmarks[camera_id] = {}
          if camera_id not in bboxes:
              bboxes[camera_id] = {}
          if camera_id not in blendshape_scores:
              blendshape_scores[camera_id] = {}
              
          landmarks[camera_id][timestep_id] = lmks
          blendshape_scores[camera_id][timestep_id] = blendshape_score
          # bboxes[camera_id][timestep_id] = bbox
          
    return landmarks, bboxes, blendshape_scores
    
  def annotate_landmarks(self, dataloader):
      
    lmks_face, bboxes_faces, blendshape_scores = self.detect_dataset(dataloader)

    # construct final json
    for camera_id, lmk_face_camera in lmks_face.items():
      bounding_box = []
      face_landmark_2d = []
      blendshape_score = []
      for timestep_id in lmk_face_camera.keys():
          # bounding_box.append(bboxes_faces[camera_id][timestep_id][None])
          face_landmark_2d.append(lmks_face[camera_id][timestep_id][None])
          blendshape_score.append(blendshape_scores[camera_id][timestep_id][None])

      lmk_dict = {
          # "bounding_box": bounding_box,
          "face_landmark_2d": face_landmark_2d,
          "blendshape_score": blendshape_score,
      }

      for k, v in lmk_dict.items():
          if len(v) > 0:
              lmk_dict[k] = np.concatenate(v, axis=0)
      out_path = dataloader.dataset.get_property_path(
          "landmark2d/mediapipe", camera_id=camera_id
      )
      logger.info(f"Saving landmarks to: {out_path}")
      if not out_path.parent.exists():
          out_path.parent.mkdir(parents=True)
      np.savez(out_path, **lmk_dict)