import os
from os.path import join
import numpy as np
from utils.bvh2joint import bvh2joint, default_end_link_trans
from utils.fbx2objpose import fbx2objpose, fbx2camerapose
from utils.denoising import denoising_pose
from utils.visualization import render_mocap_data_on_camera_space
from utils.txt2intrinsic import txt2intrinsic
import cv2
import open3d as o3d


def get_mocap_data(cfg, egocentric=False):
    """
    egocentric: 是egocentric view / 3rd view
    """

    # TODO: 支持多物体
    
    # get raw data
    end_link_trans = default_end_link_trans()
    if "person1_bvh_path" in cfg:
        joint_datas = []
        for i in range(2):
            key = "person" + str(i+1) + "_bvh_path"
            if key in cfg:
                joint_datas.append(bvh2joint(bvh_path=cfg[key], frame_ids=None, end_link_trans=end_link_trans))
    else:
        joint_datas = None
    if "obj_fbx_path" in cfg:
        obj_pose = fbx2objpose(fbx_path=cfg["obj_fbx_path"])
    else:
        obj_pose = None

    N = joint_datas[0].shape[0]  # frame number
    if len(joint_datas) > 1:
        for i in range(1, len(joint_datas)):
            assert joint_datas[i].shape[0] == N
    print("frame number:", N)

    if egocentric:
        helmet1_camera_pose = fbx2camerapose(fbx_path=cfg["helmet1_fbx_path"], camera2tracker_path=cfg["helmet1_camera2tracker_path"])

    # denoising
    if not obj_pose is None:
        obj_pose = denoising_pose(obj_pose)
    if egocentric:
        helmet1_camera_pose = denoising_pose(helmet1_camera_pose)

    obj_poses = [obj_pose] if not obj_pose is None else None  # 单物体
    if egocentric:
        camera_pose = helmet1_camera_pose[::, ...]
    mocap_data = {
        "joint_datas": joint_datas,
        "objposes": obj_poses,
    }
    if egocentric:
        mocap_data["camera_pose"] = camera_pose
    return mocap_data


if __name__ == "__main__":

    cfg = {}

    ############## CHANGE BELOW #################
    # mocap data
    cfg["person1_bvh_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/person1.bvh"
    cfg["person2_bvh_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/person2.bvh"
    cfg["obj_fbx_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/desk001.fbx"
    cfg["helmet1_fbx_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/helmet1.fbx"
    # camera parameters (new和old的camera2tracker有3deg2mm的偏差, 可能是由于3D打印件晃动)
    cfg["helmet1_intrinsic_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/helmet/helmet1_AY3A131005M/intrinsic.txt"
    cfg["helmet1_camera2tracker_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/helmet/helmet1_AY3A131005M/camera2tracker.txt"
    # object model
    cfg["obj_model_path"] = "/home/liuyun/HHO-dataset/data_processing/exp_data/objects/table001/desk1_m.obj"
    # rgb folder
    rgb_video_path = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/rgbd/_ob_camera_01_color_image_raw.mp4"
    # depth folder
    depth_video_path = None
    # time alignment file
    frame_mocap2vision_path = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/frame_mocap2vision.txt"
    # save path
    save_path = "/home/liuyun/HHO-dataset/data_processing/exp_data/20230522_data/24/ex_egoview_HOI.mp4"
    ############## CHANGE ABOVE #################


    # get data
    mocap_data = get_mocap_data(cfg=cfg, egocentric=True)
    camera_intrinsic, _ = txt2intrinsic(cfg["helmet1_intrinsic_path"])
    objmodels = [o3d.io.read_triangle_mesh(cfg["obj_model_path"])]
    # fns = []
    # for fn in os.listdir(rgb_folder):
    #     fns.append(fn)
    # fns.sort()
    # rgb_data = []
    # for fn in fns:
    #     # tstamp = float(fn.split("_")[-1]) / 1e3
    #     tstamp = float(fn.split("_")[-1].split(".")[0]) / 1e9
    #     img = cv2.imread(join(rgb_folder, fn), cv2.IMREAD_UNCHANGED)
    #     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #     rgb_data.append((tstamp, img))  # (timestamp, img), timestamp的单位是s
    
    timestamp = []
    with open(rgb_video_path.replace(".mp4", "_timestamp.txt"), "r") as f:
        for line in f:
            values = line.strip().split(",")
            timestamp = [float(v) / 1e9 for v in values]
            break
    cap = cv2.VideoCapture(rgb_video_path)
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    fps = cap.get(cv2.CAP_PROP_FPS)
    rgb_data = []
    suc = cap.isOpened()
    frame_cnt = -1
    while True:
        frame_cnt += 1
        suc, img = cap.read()
        if not suc:
            break
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        rgb_data.append((timestamp[frame_cnt], img))
    cap.release()

    # 标注: rgb的第0帧对应mocap数据的第几帧？
    start_mocap_idx = int(np.loadtxt(frame_mocap2vision_path).item())
    
    render_mocap_data_on_camera_space(mocap_data["joint_datas"], mocap_data["objposes"], objmodels, mocap_data["camera_pose"], camera_intrinsic, rgb_data=rgb_data, start_mocap_idx=start_mocap_idx, save_path=save_path, fps=5)
    # render_mocap_data_on_camera_space(None, None, objmodels, mocap_data["camera_pose"], camera_intrinsic, rgb_data=rgb_data, start_mocap_idx=start_mocap_idx, save_path=save_path, fps=5)
