import os
from os.path import join, isfile
import sys
sys.path.append("..")
import argparse
import numpy as np
import cv2
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans, localpose_to_headrot
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic
from data_recording.multi_camera.utils.video_parser import mp42imgs
from data_recording.multi_camera.vis_mv_HHO_pose import get_obj_info
from data_processing.utils.time_align import time_align, prepare_objpose
from data_processing.utils.process_timestamps import txt_to_paried_frameids
from data_processing.utils.visualization import draw_head_orientation
import open3d as o3d


def render(img, joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic, head_orientation=None):
    # world -> camera -> image
    # person1
    if not joint1_data is None:
        p = np.concatenate((joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint1_pixels = uv.astype(np.int32)
    # person2
    if not joint2_data is None:
        p = np.concatenate((joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint2_pixels = uv.astype(np.int32)
    # object
    if not obj2world is None:
        p = np.concatenate((obj_p, np.ones((obj_p.shape[0], 1))), axis=-1)  # (500, 4), in object space
        p = p @ obj2world.T  # (500, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (500, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
        obj_pixels = uv.astype(np.int32)
    else:
        obj_pixels = None
    
    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not joint1_data is None:
        for p in joint1_pixels:
            cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
        if (not head_orientation is None) and (not head_orientation["person1"] is None):
            img = draw_head_orientation(img, joint1_data[59], head_orientation["person1"], camera_intrinsic, camera_extrinsic)
    if not joint2_data is None:
        for p in joint2_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
        if (not head_orientation is None) and (not head_orientation["person2"] is None):
            img = draw_head_orientation(img, joint2_data[59], head_orientation["person2"], camera_intrinsic, camera_extrinsic)
    if not obj2world is None:
        for p in obj_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)
    return img


def vis_single_video(data_dir, obj_dataset_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, cfg):
    real_cfg = cfg.copy()
    if not isfile(join(data_dir, "VTS_data.npz")):
        print("#################### [error] no VTS_data.npz !!! #####################")
        return
    
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()  # get VTS data
    obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)
    if obj_name is None:
        if real_cfg["vis_obj"]:
            print("[error] no VTS object data in {}!!!".format(data_dir))
        real_cfg["vis_obj"] = False
    
    # prepare paired_frames and objposes
    # if (not isfile(join(data_dir, "aligned_frame_ids.txt"))) or (real_cfg["vis_obj"] and (not isfile(join(data_dir, "aligned_objposes.npy")))):
    #     cfg_for_time_align = {
    #         "camera1": True,
    #         "camera2": True,
    #         "camera3": True,
    #         "camera4": True,
    #         "person1": True,
    #         "person2": True,
    #         "object": False,
    #     }
    #     VTS_add_time = time_align(data_dir, cfg_for_time_align, threshould=40000000, VTS_add_time=None)
    # if real_cfg["vis_obj"] and (not isfile(join(data_dir, "aligned_objposes.npy"))):
    #     prepare_objpose(data_dir, obj_name, threshould=40000000, VTS_add_time=VTS_add_time)
    
    cfg_for_time_align = {
        "camera1": True,
        "camera2": True,
        "camera3": True,
        "camera4": True,
        "person1": True,
        "person2": True,
        "object": False,
    }
    VTS_add_time = time_align(data_dir, cfg_for_time_align, threshould=40000000, VTS_add_time=None)
    if real_cfg["vis_obj"]:
        prepare_objpose(data_dir, obj_name, threshould=40000000, VTS_add_time=VTS_add_time)
    
    paired_frames = txt_to_paried_frameids(join(data_dir, "aligned_frame_ids.txt"))

    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))
    cam3_intrinsic, _ = txt2intrinsic(join(cam3_dir, "intrinsic.txt"))
    cam3_pose = np.loadtxt(join(cam3_dir, "camera2world.txt"))
    cam4_intrinsic, _ = txt2intrinsic(join(cam4_dir, "intrinsic.txt"))
    cam4_pose = np.loadtxt(join(cam4_dir, "camera2world.txt"))

    # get RGB imgs
    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb3_imgs = mp42imgs(join(data_dir, "_d455_camera3_color_image_raw.mp4"))
    rgb4_imgs = mp42imgs(join(data_dir, "_d455_camera4_color_image_raw.mp4"))

    obj_p = None
    if real_cfg["vis_obj"]:  # CAD model and poses
        obj_mesh = o3d.io.read_triangle_mesh(obj_model_path)
        obj_p = np.float32(obj_mesh.sample_points_poisson_disk(100).points)
        obj_poses = np.load(join(data_dir, "aligned_objposes.npy"))
    if real_cfg["vis_person1"]:
        person1_list = VTS_data["/joints"]
    if real_cfg["vis_person2"]:
        person2_list = VTS_data["/joints2"]
    
    data_list = []
    for i, paired_frame in enumerate(paired_frames):
        p_rgb1, p_rgb2, p_rgb3, p_rgb4, p_person1, p_person2 = paired_frame[:6]
        p_obj = i
        if real_cfg["vis_obj"]:
            obj2world = obj_poses[p_obj]
        if real_cfg["vis_person1"]:
            joint1_globalpos, joint1_localrot = bvh2joint(person1_list[p_person1], end_link_trans=default_end_link_trans(), return_local_rot=True)
        if real_cfg["vis_person2"]:
            joint2_globalpos, joint2_localrot = bvh2joint(person2_list[p_person2], end_link_trans=default_end_link_trans(), return_local_rot=True)
        data_list.append(
            {
                "rgb1": rgb1_imgs[p_rgb1],
                "rgb2": rgb2_imgs[p_rgb2],
                "rgb3": rgb3_imgs[p_rgb3],
                "rgb4": rgb4_imgs[p_rgb4],
                "obj2world": obj2world if real_cfg["vis_obj"] else None,
                "person1": {"globalpos": joint1_globalpos, "localrot": joint1_localrot} if real_cfg["vis_person1"] else {"globalpos": None, "localrot": None},
                "person2": {"globalpos": joint2_globalpos, "localrot": joint2_localrot} if real_cfg["vis_person2"] else {"globalpos": None, "localrot": None},
            }
        )
        if i % 100 == 0:
            print("finish processing frame", i)

    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720 * 2
    vw = cv2.VideoWriter(join(data_dir, "vis_mv_HHO_pose.mp4"), fourcc, 30, (W, H))
    for data in data_list:
        # render head orientation
        head_orientation = None
        if ("vis_head_orientation" in cfg) and cfg["vis_head_orientation"]:
            head_orientation = {
                "person1": localpose_to_headrot(data["person1"]["localrot"]),
                "person2": localpose_to_headrot(data["person2"]["localrot"]),
            }
        img1 = render(data["rgb1"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam1_intrinsic, np.linalg.inv(cam1_pose), head_orientation=head_orientation)
        img2 = render(data["rgb2"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam2_intrinsic, np.linalg.inv(cam2_pose), head_orientation=head_orientation)
        img3 = render(data["rgb3"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam3_intrinsic, np.linalg.inv(cam3_pose), head_orientation=head_orientation)
        img4 = render(data["rgb4"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam4_intrinsic, np.linalg.inv(cam4_pose), head_orientation=head_orientation)
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:720, :1280] = img1
        img[:720, 1280:] = img2
        img[720:, :1280] = img3
        img[720:, 1280:] = img4
        vw.write(img)
    vw.release()
    
    
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--clip_name', type=str, default="20230802_1")
    args = parser.parse_args()
    return args


if __name__ == "__main__":

    ############################################################################################################
    dataset_dir = "/share/datasets/HHO_dataset"
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    cfg = {
        "vis_obj": True,
        "vis_person1": True,
        "vis_person2": True,
        "vis_head_orientation": True,
    }
    ############################################################################################################

    args = get_args()
    clip_name = args.clip_name
    clip_dir = join(dataset_dir, "data", clip_name)
    camera_param_dir = join(dataset_dir, "rawdata", clip_name, "param")
    cam1_dir = join(camera_param_dir, "d455_1")
    cam2_dir = join(camera_param_dir, "d455_2")
    cam3_dir = join(camera_param_dir, "d455_3")
    cam4_dir = join(camera_param_dir, "d455_4")

    for sequence_name in os.listdir(clip_dir):
        data_dir = join(clip_dir, sequence_name)
        print("visualizing {} ...".format(data_dir))
        vis_single_video(data_dir, obj_dataset_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, cfg)
