import os
from os.path import join
import numpy as np
import cv2
from transforms3d.quaternions import quat2mat
from utils.bvh2joint import bvh2joint, default_end_link_trans
from utils.txt_parser import txt2intrinsic, txt2timestamps
from utils.video_parser import mp42imgs
import open3d as o3d


def obj2rgb(pixel_rec, img, obj2world, marker_p, camera_intrinsic, camera_extrinsic):
    if not obj2world is None:
        p = marker_p # (4, 4) in obj space
        p = p @ obj2world.T  # (4, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (4, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (4, 2), in image space
        obj_pixels = uv.astype(np.int32)
        print(obj_pixels)
        pixel_rec.append(obj_pixels)
    else:
        obj_pixels = None
    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not obj2world is None:
        for p in obj_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)
    return img



if __name__ == "__main__":
    ################################################################################################################
    data_dir = "./data/20230615_test"
    cam1_dir = "../collect_rawdata/camera_calib/d455_1"
    cam2_dir = "../collect_rawdata/camera_calib/d455_2"
    obj_name = "box001"
    obj_model_path = "../object_dataset_final/箱子/箱子1/box1_m.obj"
    delta_time_VTS_Ubuntu1 = 0  # calibrate it!!!

    # marker_vts_position = [[-0.116, 0.247, -0.226, -0.266], 
    #                        [0.610, 0.561, 0.497, 0.381], 
    #                        [-0.308, 0.164, -0.405, -0.227], 
    #                        [1, 1, 1, 1]]
    marker_vts_position = np.array([[0.256, 0.381, 0.241, 1],
                           [-0.245, 0.566, -0.165, 1],
                           [0.104, 0.610, 0.316, 1],
                           [0.210, 0.497, 0.417, 1]])
    ################################################################################################################



    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))

    # get data
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    rigid_pose_list = VTS_data["/rigid"]
    rigid_timestamps = VTS_data["rigid_timestamp"]
    labels = VTS_data["/labels"]
    
    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb1_timestamps = txt2timestamps(join(data_dir, "_d455_camera1_aligned_depth_to_color_image_raw_timestamp.txt"))
    depth1_video = None
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb2_timestamps = txt2timestamps(join(data_dir, "_d455_camera2_aligned_depth_to_color_image_raw_timestamp.txt"))
    depth2_video = None
    # prepare paired data
    N_rigid = len(rigid_timestamps)
    # N_person1 = len(person1_timestamps)
    # N_person2 = len(person2_timestamps)
    N_rgb1 = len(rgb1_timestamps)
    N_rgb2 = len(rgb2_timestamps)
    # assert (N_rigid > 0) and (N_person1 > 0) and (N_rgb1 > 0) and (N_rgb2 > 0)
    threshould = 40000000  # 40ms
    data_list = []
    p_rigid, p_person1, p_person2, p_rgb2 = 0, 0, 0, 0
    for rgb1_idx in range(N_rgb1):
        t = rgb1_timestamps[rgb1_idx]
        while (p_rigid + 1 < N_rigid) and (abs(t + delta_time_VTS_Ubuntu1 - rigid_timestamps[p_rigid + 1]) < abs(t + delta_time_VTS_Ubuntu1 - rigid_timestamps[p_rigid])):
            p_rigid += 1
        # while (p_person1 + 1 < N_person1) and (abs(t + delta_time_VTS_Ubuntu1 - person1_timestamps[p_person1 + 1]) < abs(t + delta_time_VTS_Ubuntu1 - person1_timestamps[p_person1])):
            # p_person1 += 1
        # while (p_person2 + 1 < N_person2) and (abs(t + delta_time_VTS_Ubuntu1 - person2_timestamps[p_person2 + 1]) < abs(t + delta_time_VTS_Ubuntu1 - person2_timestamps[p_person2])):
        #     p_person2 += 1
        while (p_rgb2 + 1 < N_rgb2) and (abs(t - rgb2_timestamps[p_rgb2 + 1]) < abs(t - rgb2_timestamps[p_rgb2])):
            p_rgb2 += 1
        # if (abs(t + delta_time_VTS_Ubuntu1 - rigid_timestamps[p_rigid]) > threshould) or (abs(t + delta_time_VTS_Ubuntu1 - person1_timestamps[p_person1]) > threshould) or (abs(t - rgb2_timestamps[p_rgb2]) > threshould):
        #     print("[error in preparing paired data] wrong frame idx =", rgb1_idx)
        #     continue

        rigid_poses = rigid_pose_list[p_rigid]
        device_names = labels[p_rigid]
        obj_label = None
        
        for i, device_name in enumerate(device_names):
            if device_name == obj_name:
                obj_label = i
        if not obj_label is None:
            obj2world = np.eye(4)
            obj2world[:3, 3] = rigid_poses[obj_label]["position"]
            obj2world[:3, :3] = quat2mat(rigid_poses[obj_label]["orientation"])
            # calculate the markers position 
            marker_pos = np.matmul(obj2world, marker_vts_position)

        else:
            obj2world = None

        data_list.append(
            {
                "timestamp": t,
                "rgb1": rgb1_imgs[rgb1_idx],
                "depth1": None,
                "rgb2": rgb2_imgs[p_rgb2],
                "depth2": None,
                "obj2world": obj2world,
            }
        )
        
    marker_vts_position[:, 1] -= 0.31

    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720
    vw = cv2.VideoWriter(join(data_dir, "vis_mv_marker_pose.mp4"), fourcc, 30, (W, H))
    last_data = None

    
    pixel_rec_cam1 = []
    pixel_rec_cam2 = []

    for data in data_list:
        # img1 = render(data["rgb1"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam1_intrinsic, np.linalg.inv(cam1_pose))
        # img2 = render(data["rgb2"], data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam2_intrinsic, np.linalg.inv(cam2_pose))
        img1 = obj2rgb(pixel_rec_cam1, data["rgb1"], data["obj2world"], marker_vts_position,cam1_intrinsic, np.linalg.inv(cam1_pose))
        img2 = obj2rgb(pixel_rec_cam2, data["rgb2"], data["obj2world"], marker_vts_position, cam2_intrinsic, np.linalg.inv(cam2_pose))
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:, :1280] = img1
        img[:, 1280:] = img2
        vw.write(img)
        last_data = data.copy()
    vw.release()

    np.save(join(data_dir, "pixel_rec_cam1_marker.npy"), pixel_rec_cam1)
    np.save(join(data_dir, "pixel_rec_cam2_marker.npy"), pixel_rec_cam2)