import sys
sys.path.append("../..")
import os
from os.path import join, isfile
import numpy as np
import cv2
from transforms3d.quaternions import quat2mat
from tqdm import tqdm
import torch
from segment_anything import SamPredictor, sam_model_registry
from data_processing.utils.VTS_object import get_obj_info
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic, txt2timestamps
from data_recording.multi_camera.utils.video_parser import mp42imgs
import open3d as o3d


def compute_kps(joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic):
    # world -> camera -> image
    # person1
    if not joint1_data is None:
        p = np.concatenate((joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint1_pixels = uv.astype(np.int32)
    # person2
    if not joint2_data is None:
        p = np.concatenate((joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint2_pixels = uv.astype(np.int32)
    # object
    if not obj2world is None:
        p = np.concatenate((obj_p, np.ones((obj_p.shape[0], 1))), axis=-1)  # (500, 4), in object space
        p = p @ obj2world.T  # (500, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (500, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
        obj_pixels = uv.astype(np.int32)
    else:
        obj_pixels = None

    # save kps
    kps = {
        "person1": joint1_pixels if not joint1_data is None else None,
        "person2": joint2_pixels if not joint2_data is None else None,
        "obj": obj_pixels if not obj2world is None else None,
    }
    
    return kps


def render_kps(img, kps):
    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not kps["person1"] is None:
        for p in kps["person1"]:
            cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
    if not kps["person2"] is None:
        for p in kps["person2"]:
            cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
    if not kps["obj"] is None:
        for p in kps["obj"]:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)

    return img


def expand_bbox(bbox, alpha=1.2):
    center = (bbox[:2] + bbox[2:]) / 2
    Lx = bbox[2] - bbox[0]
    Ly = bbox[3] - bbox[1]

    new_bbox = np.float32([center[0] - Lx / 2 * alpha, center[1] - Ly / 2 * alpha, center[0] + Lx / 2 * alpha, center[1] + Ly / 2 * alpha])
    return new_bbox


def compute_2Dmask_naive(img, kps, predictor):
    """
    return an uint8 numpy array
    """
    
    N_mask = 3
    bbox_person1 = expand_bbox(np.concatenate((kps["person1"].min(axis=0), kps["person1"].max(axis=0))), alpha=1.2)
    bbox_person2 = expand_bbox(np.concatenate((kps["person2"].min(axis=0), kps["person2"].max(axis=0))), alpha=1.2)
    bbox_obj = expand_bbox(np.concatenate((kps["obj"].min(axis=0), kps["obj"].max(axis=0))), alpha=1.2)
    prompt_bbox = torch.FloatTensor([
        bbox_person1,
        bbox_person2,
        bbox_obj,
    ]).to(predictor.device)

    # predict
    transformed_boxes = predictor.transform.apply_boxes_torch(prompt_bbox, img.shape[:2])
    predictor.set_image(img)
    SAM_masks, scores, logits = predictor.predict_torch(
        point_coords=None,
        point_labels=None,
        boxes=transformed_boxes,
        multimask_output=False,
    )
    SAM_masks = SAM_masks[:, 0, :, :].cpu().numpy()
    mask = np.zeros(img.shape[:2]).astype(np.uint8)
    for i in range(N_mask):
        mask[SAM_masks[i]] = i + 1

    return mask


def compute_2Dmask_naive2(img, kps, predictor):
    """
    return an uint8 numpy array
    """
    
    # selected_bvh_ids = np.int32([0, 3, 6, 7, 10, 15, 16, 38, 39, 59, 60, 62])
    selected_bvh_ids = np.int32([9, 59, 3, 6, 0, 60, 62])
    prompt_points = [
        kps["person1"][selected_bvh_ids],
        kps["person2"][selected_bvh_ids],
        kps["obj"],
    ]
    
    mask = np.zeros(img.shape[:2]).astype(np.uint8)

    # predict
    for i, prompt_point in enumerate(prompt_points):
        predictor.set_image(img)
        SAM_masks, scores, logits = predictor.predict(
            point_coords=prompt_point,
            point_labels=np.ones(prompt_point.shape[0]).astype(np.uint8),
            mask_input=None,
            multimask_output=False,
        )
        mask[SAM_masks[0]] = i + 1

    return mask


def render_2Dmask(img, mask):
    color_palette = [
        (0, 0, 0),  # background
        (255, 255, 0),  # person1
        (128, 0, 128),  # person2
        (255, 0, 0),  # person3
    ]

    mask_img = np.zeros(img.shape).astype(np.uint8)
    for i in range(4):
        mask_img[mask == i] = color_palette[i]

    new_img = img.astype(np.float32) * 0.5 + mask_img.astype(np.float32) * 0.5
    new_img = new_img.clip(0, 255).astype(np.uint8)
    return new_img


def vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, obj_name, obj_model_path, predictor, cfg):
    # CAD model
    obj_p = None
    if cfg["vis_obj"]:
        obj_mesh = o3d.io.read_triangle_mesh(obj_model_path)
        obj_p = np.float32(obj_mesh.sample_points_poisson_disk(30).points)

    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))
    cam3_intrinsic, _ = txt2intrinsic(join(cam3_dir, "intrinsic.txt"))
    cam3_pose = np.loadtxt(join(cam3_dir, "camera2world.txt"))
    cam4_intrinsic, _ = txt2intrinsic(join(cam4_dir, "intrinsic.txt"))
    cam4_pose = np.loadtxt(join(cam4_dir, "camera2world.txt"))

    # get data
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    if cfg["vis_obj"]:
        rigid_pose_list = VTS_data["/rigid"]
        rigid_timestamps = VTS_data["rigid_timestamp"]
        labels = VTS_data["/labels"]
        N_rigid = len(rigid_timestamps)
    if cfg["vis_person1"]:
        person1_list = VTS_data["/joints"]
        person1_timestamps = VTS_data["person1_timestamp"]
        N_person1 = len(person1_timestamps)
    if cfg["vis_person2"]:
        person2_list = VTS_data["/joints2"]
        person2_timestamps = VTS_data["person2_timestamp"]
        N_person2 = len(person2_timestamps)
    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb1_timestamps = txt2timestamps(join(data_dir, "_d455_camera1_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb1 = len(rgb1_timestamps)
    depth1_video = None
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb2_timestamps = txt2timestamps(join(data_dir, "_d455_camera2_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb2 = len(rgb2_timestamps)
    depth2_video = None
    rgb3_imgs = mp42imgs(join(data_dir, "_d455_camera3_color_image_raw.mp4"))
    rgb3_timestamps = txt2timestamps(join(data_dir, "_d455_camera3_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb3 = len(rgb1_timestamps)
    depth3_video = None
    rgb4_imgs = mp42imgs(join(data_dir, "_d455_camera4_color_image_raw.mp4"))
    rgb4_timestamps = txt2timestamps(join(data_dir, "_d455_camera4_aligned_depth_to_color_image_raw_timestamp.txt"))
    N_rgb4 = len(rgb2_timestamps)
    depth4_video = None

    # align data
    threshould = 40000000  # 40ms
    data_list = []
    p_rigid, p_person1, p_person2, p_rgb2, p_rgb3, p_rgb4 = 0, 0, 0, 0, 0, 0
    print(N_rgb1, N_rgb2)
    for rgb1_idx in range(N_rgb1):
        t = rgb1_timestamps[rgb1_idx]
        # rgb2 align with rgb1
        while (p_rgb2 + 1 < N_rgb2) and (abs(t - rgb2_timestamps[p_rgb2 + 1]) <= abs(t - rgb2_timestamps[p_rgb2])):
            p_rgb2 += 1
        # rgb3 align with rgb1
        while (p_rgb3 + 1 < N_rgb3) and (abs(t - rgb3_timestamps[p_rgb3 + 1]) <= abs(t - rgb3_timestamps[p_rgb3])):
            p_rgb3 += 1
        # rgb4 align with rgb1
        while (p_rgb4 + 1 < N_rgb4) and (abs(t - rgb4_timestamps[p_rgb4 + 1]) <= abs(t - rgb4_timestamps[p_rgb4])):
            p_rgb4 += 1
        # obj_pose align with rgb1
        if cfg["vis_obj"]:
            while (p_rigid + 1 < N_rigid) and (abs(t - rigid_timestamps[p_rigid + 1]) <= abs(t - rigid_timestamps[p_rigid])):
                p_rigid += 1
        # person1_pose align with rgb1
        if cfg["vis_person1"]:
            while (p_person1 + 1 < N_person1) and (abs(t - person1_timestamps[p_person1 + 1]) <= abs(t - person1_timestamps[p_person1])):
                p_person1 += 1
        # person2_pose align with rgb1
        if cfg["vis_person2"]:
            while (p_person2 + 1 < N_person2) and (abs(t - person2_timestamps[p_person2 + 1]) <= abs(t - person2_timestamps[p_person2])):
                p_person2 += 1
        
        flag = abs(t - rgb2_timestamps[p_rgb2]) < threshould
        flag &= abs(t - rgb3_timestamps[p_rgb3]) < threshould
        flag &= abs(t - rgb4_timestamps[p_rgb4]) < threshould
        if cfg["vis_obj"]:
            flag &= abs(t - rigid_timestamps[p_rigid]) < threshould
        if cfg["vis_person1"]:
            flag &= abs(t - person1_timestamps[p_person1]) < threshould
        if cfg["vis_person2"]:
            flag &= abs(t - person2_timestamps[p_person2]) < threshould

        if not flag:
            print("[error in preparing paired data] wrong frame idx =", rgb1_idx)
            continue
        
        if cfg["vis_obj"]:
            obj2world = None
            rigid_poses = rigid_pose_list[p_rigid]
            device_names = labels[p_rigid]
            obj_label = None
            for i, device_name in enumerate(device_names):
                if device_name == obj_name:
                    obj_label = i
            if not obj_label is None:
                obj2world = np.eye(4)
                obj2world[:3, 3] = rigid_poses[obj_label]["position"]
                obj2world[:3, :3] = quat2mat(rigid_poses[obj_label]["orientation"])
        
        if cfg["vis_person1"]:
            joint1_globalpos, joint1_localrot = bvh2joint(person1_list[p_person1], end_link_trans=default_end_link_trans(), return_local_rot=True)
        if cfg["vis_person2"]:
            joint2_globalpos, joint2_localrot = bvh2joint(person2_list[p_person2], end_link_trans=default_end_link_trans(), return_local_rot=True)
        try:
            data_list.append(
                {
                    "timestamp": t,
                    "rgb1": rgb1_imgs[rgb1_idx],
                    "depth1": None,
                    "rgb2": rgb2_imgs[p_rgb2],
                    "depth2": None,
                    "rgb3": rgb3_imgs[p_rgb3],
                    "depth3": None,
                    "rgb4": rgb4_imgs[p_rgb4],
                    "depth4": None,
                    "obj2world": obj2world if cfg["vis_obj"] else None,
                    "person1": {"globalpos": joint1_globalpos, "localrot": joint1_localrot} if cfg["vis_person1"] else {"globalpos": None, "localrot": None},
                    "person2": {"globalpos": joint2_globalpos, "localrot": joint2_localrot} if cfg["vis_person2"] else {"globalpos": None, "localrot": None},
                }
            )
        except:
            print("[error in append data_list] wrong frame idx =", rgb1_idx)

    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720 * 2
    vw = cv2.VideoWriter(join(data_dir, "vis_mv_HHO_pose.mp4"), fourcc, 10, (W, H))
    for data in tqdm(data_list):
        
        # (1) compute 2D kps, shape = (N, 2)
        kps1 = compute_kps(data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam1_intrinsic, np.linalg.inv(cam1_pose))
        kps2 = compute_kps(data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam2_intrinsic, np.linalg.inv(cam2_pose))
        kps3 = compute_kps(data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam3_intrinsic, np.linalg.inv(cam3_pose))
        kps4 = compute_kps(data["person1"]["globalpos"], data["person2"]["globalpos"], data["obj2world"], obj_p, cam4_intrinsic, np.linalg.inv(cam4_pose))

        # (2) render 2D kps
        img1 = render_kps(data["rgb1"], kps1)
        img2 = render_kps(data["rgb2"], kps2)
        img3 = render_kps(data["rgb3"], kps3)
        img4 = render_kps(data["rgb4"], kps4)
        
        # (3) compute 2D mask, shape = (H, W), dtype = uint8
        mask1 = compute_2Dmask_naive2(data["rgb1"], kps1, predictor)
        mask2 = compute_2Dmask_naive2(data["rgb2"], kps2, predictor)
        mask3 = compute_2Dmask_naive2(data["rgb3"], kps3, predictor)
        mask4 = compute_2Dmask_naive2(data["rgb4"], kps4, predictor)

        # (4) render 2D mask
        img1 = render_2Dmask(img1, mask1)
        img2 = render_2Dmask(img2, mask2)
        img3 = render_2Dmask(img3, mask3)
        img4 = render_2Dmask(img4, mask4)

        # save
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:720, :1280] = img1
        img[:720, 1280:] = img2
        img[720:, :1280] = img3
        img[720:, 1280:] = img4
        vw.write(img)
    vw.release()


if __name__ == "__main__":
    # ################################################################################################################
    # data_dir = "./data/20230701/000"
    # cam1_dir = "../collect_rawdata/camera_calib/d455_1"
    # cam2_dir = "../collect_rawdata/camera_calib/d455_2"
    # # obj_name = "chair010"
    # # obj_model_path = "../object_dataset_final/椅子/椅子10/chair10_m.obj"
    # # obj_name = "box001"
    # # obj_model_path = "../object_dataset_final/箱子/箱子1/box1_m.obj"
    # # obj_name = "board005"
    # # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"
    # obj_name = "board005"
    # obj_model_path = "../object_dataset_final/板子/板子5/board5_m.obj"
    # ################################################################################################################

    ############################################################################################################
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    date_dir = "/share/datasets/HHO_dataset/20230701"
    camera_calib_dir = "/share/datasets/HHO_camera_calib"
    cfg = {
        "vis_obj": True,
        "vis_person1": True,
        "vis_person2": True,
    }
    device = "cuda:0"
    ############################################################################################################

    # init Segment-anything model
    sam = sam_model_registry["vit_h"](checkpoint="/home/liuyun/codebases/segment-anything/checkpoints/sam_vit_h_4b8939.pth")
    sam.to(device)
    predictor = SamPredictor(sam)

    for video_name in os.listdir(date_dir):
        
        if not video_name == "000":
            continue

        data_dir = join(date_dir, video_name)
        cam1_dir = join(camera_calib_dir, "d455_1")
        cam2_dir = join(camera_calib_dir, "d455_2")
        cam3_dir = join(camera_calib_dir, "d455_3")
        cam4_dir = join(camera_calib_dir, "d455_4")
        obj_name, obj_model_path = None, None
        if cfg["vis_obj"]:
            obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)

        print("data_dir, obj_name =", data_dir, obj_name)

        vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, obj_name, obj_model_path, predictor, cfg)
