import os
from os.path import join, isfile
import sys
sys.path.append("../..")
import numpy as np
import cv2
from transforms3d.quaternions import quat2mat
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic, txt2timestamps
from data_recording.multi_camera.utils.video_parser import mp42imgs
import open3d as o3d


def get_obj_name_correspondance():
    corr = {
        "chair": "椅子",
        "desk": "桌子",
        "box": "箱子",
        "board": "板子",
        "bucket": "bucket",
        "stick": "stick",
    }
    return corr


def get_obj_info(data_dir, obj_dataset_dir):
    VTS_path = join(data_dir, "VTS_data.npz")
    assert isfile(VTS_path)
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    if (not "/labels" in VTS_data) or (not "/rigid" in VTS_data):  # no interacted object
        return None, None
    labels = VTS_data["/labels"]

    # get object name
    N_obj = {}
    for device_names in labels:
        for device_name in device_names:
            if not device_name in N_obj:
                N_obj[device_name] = 0
            N_obj[device_name] += 1
    mx, obj_name = 0, None
    
    N_obj["action1"] = 0
    
    for name in N_obj:
        if mx < N_obj[name]:
            mx = N_obj[name]
            obj_name = name
            
    if obj_name is None:  # no interacted object
        return None, None
    
    corr = get_obj_name_correspondance()
    obj_model_path = join(obj_dataset_dir, corr[obj_name[:-3]], corr[obj_name[:-3]] + str(int(obj_name[-3:])), obj_name[:-3] + str(int(obj_name[-3:])) + "_m.obj")
    if not isfile(obj_model_path):
        obj_model_path = join(obj_dataset_dir, corr[obj_name[:-3]], corr[obj_name[:-3]] + str(int(obj_name[-3:])), obj_name[:-3] + str(int(obj_name[-3:])).zfill(3) + "_m.obj")
    if not isfile(obj_model_path):
        obj_model_path = join(obj_dataset_dir, corr[obj_name[:-3]], corr[obj_name[:-3]] + str(int(obj_name[-3:])).zfill(3), obj_name[:-3] + str(int(obj_name[-3:])) + "_m.obj")
    if not isfile(obj_model_path):
        obj_model_path = join(obj_dataset_dir, corr[obj_name[:-3]], corr[obj_name[:-3]] + str(int(obj_name[-3:])).zfill(3), obj_name[:-3] + str(int(obj_name[-3:])).zfill(3) + "_m.obj")

    return obj_name, obj_model_path


def render(img, joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic):
    # world -> camera -> image
    # person1
    if not joint1_data is None:
        p = np.concatenate((joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint1_pixels = uv.astype(np.int32)
    # person2
    if not joint2_data is None:
        p = np.concatenate((joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint2_pixels = uv.astype(np.int32)
    # object
    if not obj2world is None:
        p = np.concatenate((obj_p, np.ones((obj_p.shape[0], 1))), axis=-1)  # (500, 4), in object space
        p = p @ obj2world.T  # (500, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (500, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
        obj_pixels = uv.astype(np.int32)
    else:
        obj_pixels = None
    
    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not joint1_data is None:
        for p in joint1_pixels:
            cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
    if not joint2_data is None:
        for p in joint2_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
    if not obj2world is None:
        for p in obj_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)
    return img


def vis_single_video(data_dir, cam1_dir, cam2_dir, obj_name, obj_model_path, cfg, VTS_add_time):
    # data_dir = "./data/20230701/000"
    # cam1_dir = "../collect_rawdata/camera_calib/d455_1"
    # cam2_dir = "../collect_rawdata/camera_calib/d455_2"
    # # obj_name = "chair010"
    # # obj_model_path = "../object_dataset_final/椅子/椅子10/chair10_m.obj"
    # # obj_name = "box001"
    # # obj_model_path = "../object_dataset_final/箱子/箱子1/box1_m.obj"
    # # obj_name = "board005"
    # # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"
    # obj_name = "board005"
    # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"

    # CAD model
    obj_p = None
    if cfg["vis_obj"]:
        obj_mesh = o3d.io.read_triangle_mesh(obj_model_path)
        obj_p = np.float32(obj_mesh.sample_points_poisson_disk(100).points)

    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))


    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb1_timestamps = txt2timestamps(join(data_dir, "_d455_camera1_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb1 = len(rgb1_timestamps)
    depth1_video = None
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb2_timestamps = txt2timestamps(join(data_dir, "_d455_camera2_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb2 = len(rgb2_timestamps)
    depth2_video = None
    
    
        # get data
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    
    #############################
    rgb_time = rgb1_timestamps[0]
    ego_VTS_rigid_timestamps = VTS_data["VTS_rigid_timestamp"]
    VTS_time = ego_VTS_rigid_timestamps[0]
    if cfg["auto_VTS_add_time"]:
        VTS_add_time =  rgb_time - VTS_time
        print(VTS_add_time)
    #############################


    if cfg["vis_obj"]:
        rigid_pose_list = VTS_data["/rigid"]
        rigid_timestamps = [x+VTS_add_time for x in VTS_data["rigid_timestamp"]]
        labels = VTS_data["/labels"]
        N_rigid = len(rigid_timestamps)
    if cfg["vis_person1"]:
        person1_list = VTS_data["/joints"]
        person1_timestamps = [x+VTS_add_time for x in VTS_data["person1_timestamp"]]
        N_person1 = len(person1_timestamps)
    if cfg["vis_person2"]:
        person2_list = VTS_data["/joints2"]
        person2_timestamps = [x+VTS_add_time for x in VTS_data["person2_timestamp"]]
        N_person2 = len(person2_timestamps)
    
    # align data
    threshould = 40000000  # 40ms
    data_list = []
    p_rigid, p_person1, p_person2, p_rgb2 = 0, 0, 0, 0
    print(N_rgb1, N_rgb2)
    for rgb1_idx in range(N_rgb1):
        t = rgb1_timestamps[rgb1_idx]
        # rgb2 align with rgb1
        while (p_rgb2 + 1 < N_rgb2) and (abs(t - rgb2_timestamps[p_rgb2 + 1]) <= abs(t - rgb2_timestamps[p_rgb2])):
            p_rgb2 += 1
        # obj_pose align with rgb1
        if cfg["vis_obj"]:
            while (p_rigid + 1 < N_rigid) and (abs(t - rigid_timestamps[p_rigid + 1]) <= abs(t - rigid_timestamps[p_rigid])):
                p_rigid += 1
        # person1_pose align with rgb1
        if cfg["vis_person1"]:
            while (p_person1 + 1 < N_person1) and (abs(t - person1_timestamps[p_person1 + 1]) <= abs(t - person1_timestamps[p_person1])):
                p_person1 += 1
        # person2_pose align with rgb1
        if cfg["vis_person2"]:
            while (p_person2 + 1 < N_person2) and (abs(t - person2_timestamps[p_person2 + 1]) <= abs(t - person2_timestamps[p_person2])):
                p_person2 += 1
        
        flag = abs(t - rgb2_timestamps[p_rgb2]) < threshould
        if cfg["vis_obj"]:
            flag &= abs(t - rigid_timestamps[p_rigid]) < threshould
        if cfg["vis_person1"]:
            flag &= abs(t - person1_timestamps[p_person1]) < threshould
        if cfg["vis_person2"]:
            flag &= abs(t - person2_timestamps[p_person2]) < threshould

        if not flag:
            print("[error in preparing paired data] wrong frame idx =", rgb1_idx)
            continue
        
        if cfg["vis_obj"]:
            obj2world = None
            rigid_poses = rigid_pose_list[p_rigid]
            device_names = labels[p_rigid]
            obj_label = None
            for i, device_name in enumerate(device_names):
                if device_name == obj_name:
                    obj_label = i
            if not obj_label is None:
                obj2world = np.eye(4)
                obj2world[:3, 3] = rigid_poses[obj_label]["position"]
                obj2world[:3, :3] = quat2mat(rigid_poses[obj_label]["orientation"])
            # helmet12world = None
            # helmet1_label = None
            # for i, device_name in enumerate(device_names):
            #     if device_name == "helmet1":
            #         helmet1_label = i
            # if not helmet1_label is None:
            #     helmet12world = np.eye(4)
            #     helmet12world[:3, 3] = rigid_poses[helmet1_label]["position"]
            #     helmet12world[:3, :3] = quat2mat(rigid_poses[helmet1_label]["orientation"])
        
        if cfg["vis_person1"]:
            joint1_globalpos, joint1_localrot = bvh2joint(person1_list[p_person1], end_link_trans=default_end_link_trans(), return_local_rot=True)
        if cfg["vis_person2"]:
            joint2_globalpos, joint2_localrot = bvh2joint(person2_list[p_person2], end_link_trans=default_end_link_trans(), return_local_rot=True)
        try:
            data_list.append(
                {
                    "timestamp": t,
                    "rgb1": rgb1_imgs[rgb1_idx],
                    "depth1": None,
                    "rgb2": rgb2_imgs[p_rgb2],
                    "depth2": None,
                    "obj2world": obj2world if cfg["vis_obj"] else None,
                    "person1": {"globalpos": joint1_globalpos, "localrot": joint1_localrot} if cfg["vis_person1"] else {"globalpos": None, "localrot": None},
                    "person2": {"globalpos": joint2_globalpos, "localrot": joint2_localrot} if cfg["vis_person2"] else {"globalpos": None, "localrot": None},
                }
            )
        except:
            print("[error in append data_list] wrong frame idx =", rgb1_idx)

    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720
    vw = cv2.VideoWriter(join(data_dir, "vis_mv_HHO_pose.mp4"), fourcc, 30, (W, H))
    last_data = None
    for data in data_list:
        img1 = render(data["rgb1"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam1_intrinsic, np.linalg.inv(cam1_pose))
        img2 = render(data["rgb2"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam2_intrinsic, np.linalg.inv(cam2_pose))
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:, :1280] = img1
        img[:, 1280:] = img2
        vw.write(img)
        last_data = data.copy()
    vw.release()


if __name__ == "__main__":
    # ################################################################################################################
    # data_dir = "./data/20230701/000"
    # cam1_dir = "../collect_rawdata/camera_calib/d455_1"
    # cam2_dir = "../collect_rawdata/camera_calib/d455_2"
    # # obj_name = "chair010"
    # # obj_model_path = "../object_dataset_final/椅子/椅子10/chair10_m.obj"
    # # obj_name = "box001"
    # # obj_model_path = "../object_dataset_final/箱子/箱子1/box1_m.obj"
    # # obj_name = "board005"
    # # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"
    # obj_name = "board005"
    # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"
    # ################################################################################################################

    ############################################################################################################
    obj_dataset_dir = "/home/liuyun/HHO/object_dataset_final"
    date_dir = "/home/liuyun/HHO/multi-camera/data/20230807"
    seq_name = "test014"  # only handle one video use it
    VTS_add_time = 300000000  # TODO: change it!!!
    cfg = {
        "vis_obj": True,
        "vis_person1": True,
        "vis_person2": True,
        "auto_VTS_add_time": True,
    }
    ############################################################################################################

    for video_name in os.listdir(date_dir):
        # comment this to handle all the video in one day
        if video_name != seq_name:
            continue
        # ##########################################
        print("---------------------------------")
        print(video_name)
        data_dir = join(date_dir, video_name)
        cam1_dir = "../collect_rawdata/camera_calib/d455_1"
        cam2_dir = "../collect_rawdata/camera_calib/d455_2"
        obj_name, obj_model_path = None, None
        if cfg["vis_obj"]:
            try:
                obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)
            except Exception as e:
                print("************************************")
                print(video_name)
                print(e)
                print("************************************")
                continue                

        print("data_dir, obj_name =", data_dir, obj_name)
        print(obj_model_path)
        vis_single_video(data_dir, cam1_dir, cam2_dir, obj_name, obj_model_path, cfg, VTS_add_time)
