# -*-coding:utf-8-*-
import argparse
import os
import random
import numpy as np
import cv2
import glob
from mediapipe import solutions
from mediapipe.framework.formats import landmark_pb2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from scipy.signal import savgol_filter
# import onnxruntime as ort
from collections import OrderedDict
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from tqdm import tqdm

def rot2euler(rotation_matrix):
    r11 = rotation_matrix[0, 0]
    r21 = rotation_matrix[1, 0]
    r31 = rotation_matrix[2, 0]
    r32 = rotation_matrix[2, 1]
    r33 = rotation_matrix[2, 2]
    theta = np.arctan2(r32, r33)
    phi = np.arctan2(-r31, np.sqrt(r11**2 + r21**2))
    psi = np.arctan2(r21, r11)
    euler_angles = np.stack([theta, phi, psi])
    return euler_angles

def infer_bs(mp4_path):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    base_options = python.BaseOptions(model_asset_path="./data_utils/blendshape_capture/face_landmarker.task")
    options = vision.FaceLandmarkerOptions(base_options=base_options,
                                           output_face_blendshapes=True,
                                           output_facial_transformation_matrixes=True,
                                           num_faces=1)
    detector = vision.FaceLandmarker.create_from_options(options)

    root_path = os.path.dirname(mp4_path)
    npy_path = os.path.join(root_path, "bs.npy")
    pose_path = os.path.join(root_path, "pose_mp.npy")
    
    image_path = os.path.join(root_path, "img/temp.png")
    os.makedirs(os.path.join(root_path, 'img/'), exist_ok=True)
    cap = cv2.VideoCapture(mp4_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print("fps:", fps)
    print("frame_count:", frame_count)
    k = 0
    total = frame_count
    bs = np.zeros((int(total), 52), dtype=np.float32)
    pose = np.zeros((int(total), 3), dtype=np.float32)
    print("total:", total)
    print("videoPath:{} fps:{},k".format(mp4_path.split('/')[-1], fps))
    pbar = tqdm(total=int(total))
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret:
            cv2.imwrite(image_path, frame)
            image = mp.Image.create_from_file(image_path)
            result = detector.detect(image)
            if len(result.face_blendshapes)>0:
                face_blendshapes_scores = [face_blendshapes_category.score for face_blendshapes_category in
                                        result.face_blendshapes[0]]
                blendshape_coef = np.array(face_blendshapes_scores)[1:]
                blendshape_coef = np.append(blendshape_coef, 0)
                bs[k] = blendshape_coef
            if len(result.facial_transformation_matrixes)>0:
                w2c = result.facial_transformation_matrixes[0]
                pose[k] = rot2euler(w2c)
            pbar.update(1)
            k += 1
        else:
            break
    cap.release()
    pbar.close()
    # smooth
    for j in range(bs.shape[1]):
        bs[:, j] = savgol_filter(bs[:, j], 5, 3)
    for j in range(pose.shape[1]):
        pose[:, j] = savgol_filter(pose[:, j], 5, 3)
    np.save(npy_path, bs)
    np.save(pose_path, pose)


def infer_bs_pic(root_path):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    base_options = python.BaseOptions(model_asset_path="./data_utils/blendshape_capture/face_landmarker.task")
    options = vision.FaceLandmarkerOptions(base_options=base_options,
                                           output_face_blendshapes=True,
                                           output_facial_transformation_matrixes=True,
                                           num_faces=1)
    detector = vision.FaceLandmarker.create_from_options(options)
    ori_dir = os.path.join(root_path, "ori_imgs")
    img_list = glob.glob(os.path.join(ori_dir, "*.jpg"))
    mask_dir = os.path.join(root_path, "face_mask")
    total = frame_count = len(os.listdir(mask_dir))
    k = 0
    bs = np.zeros((int(total), 52), dtype=np.float32)
    npy_path = os.path.join(root_path, "bs.npy")
    for file in tqdm(img_list):
        filename = os.path.basename(file)
        mask_path = os.path.join(mask_dir, filename.replace(".jpg", ".png"))
        lms_path = file.replace(".jpg", ".lms")
        if not os.path.exists(mask_path) or not os.path.exists(lms_path):
            continue
        image = mp.Image.create_from_file(file)
        result = detector.detect(image)
        if len(result.face_blendshapes) > 0:
            face_blendshapes_scores = [face_blendshapes_category.score for face_blendshapes_category in
                                        result.face_blendshapes[0]]
            blendshape_coef = np.array(face_blendshapes_scores)[1:]
            blendshape_coef = np.append(blendshape_coef, 0)
        else:
            blendshape_coef = np.zeros(52)
        bs[k] = blendshape_coef
        k += 1
    bs = bs[:k]
    output = np.zeros((bs.shape[0], bs.shape[1]))
    for j in range(bs.shape[1]):
        output[:, j] = savgol_filter(bs[:, j], 5, 3)
    np.save(npy_path, output)
    print(np.shape(output))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--path", type=str, help="idname of target person")
    args = parser.parse_args()
    infer_bs(args.path)
    # infer_bs_pic(args.path)
