import tyro
from dataclasses import dataclass, field
from pathlib import Path


@dataclass()
class DataConfig:
    """Configuration for data paths."""
    root_folder: Path = field(default=Path("./data"))  
    out_folder: Path = field(default=Path("./out"))
    video_name: str = "video.mp4"
    task_path: Path = field(default=Path("flame2gs/jobs/face_landmarker.task"))

if __name__ == "__main__":
  cfg = tyro.cli(DataConfig)
  video_path = (cfg.root_folder / cfg.video_name).expanduser().resolve()
  print(f"Video path: {video_path}")
  if not video_path.exists():
    raise ValueError(f"Video path does not exist: {video_path}")
  # %%
  import mediapipe as mp 
  BaseOptions = mp.tasks.BaseOptions
  FaceLandmarker = mp.tasks.vision.FaceLandmarker
  FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
  VisionRunningMode = mp.tasks.vision.RunningMode
  options = FaceLandmarkerOptions(base_options=BaseOptions(model_asset_path=cfg.task_path, 
                                                          ),
                                output_face_blendshapes=False,
                                output_facial_transformation_matrixes=True,
                                running_mode=VisionRunningMode.VIDEO,
                                num_faces=1)
  
  # %%
  import cv2, tqdm, numpy as np
  import torch
  from scipy.spatial.transform import Rotation
  from gsmodel.bfmlbs import BFMLBS
  from vhap.util.landmark_detector_mp import LandmarkDetectorMP
  
  DEFAULT_FOCAL = 1.5
  
  # %%
  cap = cv2.VideoCapture(str(video_path))
  fps = cap.get(cv2.CAP_PROP_FPS)
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
  process = tqdm.tqdm(range(total_frames))
  face_orientations = []
  lmks = []
  with FaceLandmarker.create_from_options(options) as landmarker:
    for idx in process:
      success, image = cap.read()
      if not success:
        break
      image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
      mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
      
      frame_timestamp_ms = int(1000 * int(idx) / fps)
      face_landmarker_result = landmarker.detect_for_video(mp_image, frame_timestamp_ms)
      
      face_landmarks = face_landmarker_result.face_landmarks[0] 
      face_orientation = face_landmarker_result.facial_transformation_matrixes[0]
      lmk = LandmarkDetectorMP.convert_to_2dlib(face_landmarks)
      
      face_orientations.append(face_orientation)
      lmks.append(lmk)
  face_orientations = np.stack(face_orientations, axis=0)
  lmks = np.stack(lmks, axis=0)
    
  # %%
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  bfmlbs = BFMLBS().to(device)
  rotvecs = torch.from_numpy(Rotation.from_matrix(face_orientations[:, :3, :3]).as_rotvec()).float().to(device)
  shapes = torch.zeros(1, 100).to(device)
  exprs = torch.zeros(total_frames, 79).to(device)
  
  rot_mp2bfm = Rotation.from_euler('xyz', [180, 0, 0], degrees=True).as_matrix()
  lmks_mp = torch.from_numpy(2 * (lmks - np.array([0.5, 0.5, 0.])) @ rot_mp2bfm.T).float()[:, -51:] # 转入ndc坐标系
  
  # %% bundle adjustment translation
  translation_optim = torch.zeros(1, 3, dtype=torch.float, device=device).detach().requires_grad_(True)
  rotvecs_optim = rotvecs.clone().detach().requires_grad_(False)
  translation_optim.data[..., -1] = -1

  optimizer = torch.optim.Adam([translation_optim], lr=1e-2)
  focal = torch.tensor(-DEFAULT_FOCAL, device=device)

  weight = torch.zeros(51, device=device)
  weight[[10, 11, 12, 13]] = 10
  weight[[15, 16, 17]] = 1
  weight[[14, 18]] *= 0 # 鼻侧这两个点有点问题
  weight = weight.unsqueeze(0).unsqueeze(-1)

  lmks_mp = lmks_mp.to(device)
  bar = tqdm.tqdm(range(2500))

  for i in bar:
    optimizer.zero_grad()
    lmks_pred_trans = bfmlbs.forward_sub(shapes, exprs, rotvecs_optim, translation=translation_optim,
                                  sub_index=bfmlbs.lmk_info[-51:]
                                  )
    lmk_ndc = focal * lmks_pred_trans / lmks_pred_trans[..., 2:]
    loss = ((lmks_mp[..., :2] - lmk_ndc[..., :2]).abs() * weight).mean()
    loss.backward()
    optimizer.step()
    bar.set_description(f"translation loss: {loss.item():.4f}")
    
  # %% bundle adjustment shape
  shape_optim = torch.randn(1, 100, dtype=torch.float, device=device).detach().requires_grad_(True)
  optimizer = torch.optim.Adam(
    [
      {'params': [shape_optim], 'lr': 1e-2},
      {'params': [translation_optim], 'lr': 1e-3},
    ]
  )

  weight = torch.ones(51, device=device)
  weight[[10, 11, 12, 13]] = 10
  weight[[15, 16]] = 1
  weight[[14, 18]] *= 0 # 鼻侧这两个点有点问题
  weight = weight.unsqueeze(0).unsqueeze(-1)

  bar = tqdm.tqdm(range(2500))
  for i in bar:
    optimizer.zero_grad()
    lmks_pred = bfmlbs.forward_sub(shape_optim, exprs, rotvecs_optim, translation=translation_optim,
                                  sub_index=bfmlbs.lmk_info[-51:]
                                  )
    lmk_ndc = focal * lmks_pred / lmks_pred[..., 2:]
    reg_shape = shape_optim.abs().mean()
    
    loss = ((lmks_mp[..., :2] - lmk_ndc[..., :2]).abs() * weight).mean() + 1 * reg_shape
    loss.backward()
    optimizer.step()
    bar.set_description(f"shape loss: {loss.item():.4f}")
    
  # %% bundle adjustment expression
  expr_optim = torch.zeros(rotvecs.size(0), 79, dtype=torch.float, device=device).detach().requires_grad_(True)
  optimizer = torch.optim.Adam(
    [
      {'params': [expr_optim, shape_optim], 'lr': 1e-2},
    ]
  )

  weight = torch.ones(51, device=device)
  weight[10:18] = 0 # 定位后不针对鼻子优化
  weight[:10] = 0. # 眉毛抖动较大不用lmk优化, 专注下半脸

  weight = weight.unsqueeze(0).unsqueeze(-1)

  bar = tqdm.tqdm(range(5000))
  for i in bar:
    optimizer.zero_grad()
    lmks_pred = bfmlbs.forward_sub(shape_optim, expr_optim, rotvecs_optim, translation=translation_optim,
                                  sub_index=bfmlbs.lmk_info[-51:]
                                  )
    lmk_ndc = focal * lmks_pred / lmks_pred[..., 2:]
    reg_shape = shape_optim.square().mean()
    reg_expr = expr_optim.square().mean()
    
    loss_target = ((lmks_mp[..., :2] - lmk_ndc[..., :2]).abs() * weight).mean()
    
    loss = loss_target + 1e-2 * reg_shape + 1e-3 * reg_expr
    loss.backward()
    optimizer.step()
    bar.set_description(f"canloss: {loss_target.item():.4f}, shape: {reg_shape.item():.4f}, expr: {reg_expr.item():.4f}, all loss: {loss.item():.4f}")
    
  # %% save result
  outpath = (cfg.out_folder / cfg.video_name).expanduser().resolve()
  outpath = outpath.with_suffix('.npz')
  outpath.parent.mkdir(parents=True, exist_ok=True)
  out_dict = {
    'shapes': shape_optim.data.cpu().numpy(),
    'exprs': expr_optim.data.cpu().numpy(),
    'rotvecs': rotvecs_optim.data.cpu().numpy(),
    'translation': translation_optim.data.cpu().numpy(),
    "frame_count": total_frames, 
    "fps": fps
  }
  np.savez(outpath, **out_dict)