import sys
sys.path.append("../..")
import os
from os.path import join, isfile, isdir
import numpy as np
import cv2
from transforms3d.quaternions import quat2mat
import trimesh
from tqdm import tqdm
import torch
from pytorch3d.renderer import AmbientLights
from data_processing.smplx import smplx
from segment_anything import SamPredictor, sam_model_registry
from data_processing.utils.VTS_object import get_obj_info
from data_processing.utils.time_align import txt_to_paried_frameids
from data_processing.prepare_2Dmask.utils.colors import FAKE_COLOR_LIST
from data_processing.utils.pyt3d_wrapper import Pyt3DWrapper
from data_processing.utils.visualization import render_HHO
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic, txt2timestamps
from data_recording.multi_camera.utils.video_parser import mp42imgs
import open3d as o3d


def compute_kps(joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic):
    # world -> camera -> image
    # person1
    if not joint1_data is None:
        p = np.concatenate((joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint1_pixels = uv.astype(np.int32)
    # person2
    if not joint2_data is None:
        p = np.concatenate((joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint2_pixels = uv.astype(np.int32)
    # object
    if not obj_p is None:  # 有可能 obj2world非None 而 obj_p是None -> 数据里有object, 需要考虑object给别人带来的遮挡, 但不用主程序的方法生成object mask
        p = np.concatenate((obj_p, np.ones((obj_p.shape[0], 1))), axis=-1)  # (500, 4), in object space
        p = p @ obj2world.T  # (500, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (500, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
        obj_pixels = uv.astype(np.int32)
    else:
        obj_pixels = None

    # save kps
    kps = {
        "person1": joint1_pixels if not joint1_data is None else None,
        "person2": joint2_pixels if not joint2_data is None else None,
        "obj": obj_pixels if not obj_p is None else None,
    }
    
    return kps


def render_kps(img, kps):
    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not kps["person1"] is None:
        for p in kps["person1"]:
            cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
    if not kps["person2"] is None:
        for p in kps["person2"]:
            cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
    if not kps["obj"] is None:
        for p in kps["obj"]:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)

    return img


def expand_bbox(bbox, alpha=1.2):
    center = (bbox[:2] + bbox[2:]) / 2
    Lx = bbox[2] - bbox[0]
    Ly = bbox[3] - bbox[1]

    new_bbox = np.float32([center[0] - Lx / 2 * alpha, center[1] - Ly / 2 * alpha, center[0] + Lx / 2 * alpha, center[1] + Ly / 2 * alpha])
    return new_bbox


def compute_2Dmask_naive(img, kps, predictor):
    """
    return an uint8 numpy array
    """
    
    N_mask = 3
    bbox_person1 = expand_bbox(np.concatenate((kps["person1"].min(axis=0), kps["person1"].max(axis=0))), alpha=1.2)
    bbox_person2 = expand_bbox(np.concatenate((kps["person2"].min(axis=0), kps["person2"].max(axis=0))), alpha=1.2)
    bbox_obj = expand_bbox(np.concatenate((kps["obj"].min(axis=0), kps["obj"].max(axis=0))), alpha=1.2)
    prompt_bbox = torch.FloatTensor([
        bbox_person1,
        bbox_person2,
        bbox_obj,
    ]).to(predictor.device)

    # predict
    transformed_boxes = predictor.transform.apply_boxes_torch(prompt_bbox, img.shape[:2])
    predictor.set_image(img)
    SAM_masks, scores, logits = predictor.predict_torch(
        point_coords=None,
        point_labels=None,
        boxes=transformed_boxes,
        multimask_output=False,
    )
    SAM_masks = SAM_masks[:, 0, :, :].cpu().numpy()
    mask = np.zeros(img.shape[:2]).astype(np.uint8)
    for i in range(N_mask):
        mask[SAM_masks[i]] = i + 1

    return mask


def compute_2Dmask_naive2(img, kps, predictor):
    """
    return an uint8 numpy array
    """
    
    prompt_points = [
        kps["person1"],
        kps["person2"],
        kps["obj"],
    ]
    
    mask = np.zeros(img.shape[:2]).astype(np.uint8)

    # predict
    for i, prompt_point in enumerate(prompt_points):
        if (prompt_point is None) or (prompt_point.shape[0] == 0):
            continue
        predictor.set_image(img)
        SAM_masks, scores, logits = predictor.predict(
            point_coords=prompt_point,
            point_labels=np.ones(prompt_point.shape[0]).astype(np.uint8),
            mask_input=None,
            multimask_output=False,
        )
        mask[SAM_masks[0]] = i + 1

    return mask


def render_2Dmask(img, mask):
    color_palette = [
        (0, 0, 0),  # background
        (255, 255, 0),  # person1
        (128, 0, 128),  # person2
        (255, 0, 0),  # object
    ]

    mask_img = np.zeros(img.shape).astype(np.uint8)
    for i in range(4):
        mask_img[mask == i] = color_palette[i]

    new_img = img.astype(np.float32) * 0.5 + mask_img.astype(np.float32) * 0.5
    new_img = new_img.clip(0, 255).astype(np.uint8)
    return new_img


def read_SMPLX_params(data_dir, M, N, device):
    SMPLX_params = []
    for i in range(0, N, M):
        top_i = min(i + M, N)
        result_dir = join(data_dir, "{}to{}.npz".format(str(i), str(top_i - 1)))
        result = np.load(result_dir, allow_pickle=True)["results"].item()
        for j in range(top_i - i):
            SMPLX_params.append({
                "betas": result["betas"][j:j+1].to(device),
                "body_pose": result["body_pose"][j:j+1].to(device),
                "transl": result["transl"][j:j+1].to(device),
                "global_orient": result["global_orient"][j:j+1].to(device),
                "left_hand_pose": result["left_hand_pose"][j:j+1].to(device),
                "right_hand_pose": result["right_hand_pose"][j:j+1].to(device),
            })
    return SMPLX_params


def render_fake_mask(pyt3d_wrapper, smplx_model, person1_SMPLX_params, person2_SMPLX_params, object_mesh, obj2world):
    data = {
        "person1": person1_SMPLX_params,
        "person2": person2_SMPLX_params,
        "object": {
            "mesh": object_mesh,
            "obj2world": obj2world,
        } if not obj2world is None else None
    }
    img = render_HHO(pyt3d_wrapper, smplx_model, data, rgb_img=None, frame_idx=None, suffix="", save=False)
    return img


def lie_in_image(p, img_shape):
    i, j = p
    H, W = img_shape
    return (0 <= i) and (i < H) and (0 <= j) and (j < W)


def filter_kps(init_kps, fake_mask):
    """
    fake_mask: RGB image from pytorch3d.renderer, shape = (H, W, 3)
    """
    assert FAKE_COLOR_LIST == [
        [1.0, 0.0, 0.0],  # person1
        [0.0, 1.0, 0.0],  # person2
        [0.0, 0.0, 1.0],  # object
    ]
    
    selected_bvh_ids = np.int32([9, 59, 3, 6, 0, 60, 62])  # TODO: change it
    init_person1_kps = init_kps["person1"][selected_bvh_ids]
    init_person2_kps = init_kps["person2"][selected_bvh_ids]
    
    person1_kps = []
    for kps in init_person1_kps:
        j, i = kps[0], kps[1]
        if lie_in_image((i, j), fake_mask.shape[:2]) and (fake_mask[i, j, 0] > 0.8) and (fake_mask[i, j, 1] < 0.2) and (fake_mask[i, j, 2] < 0.2):
            person1_kps.append(kps)
    person2_kps = []
    for kps in init_person2_kps:
        j, i = kps[0], kps[1]
        if lie_in_image((i, j), fake_mask.shape[:2]) and (fake_mask[i, j, 0] < 0.2) and (fake_mask[i, j, 1] > 0.8) and (fake_mask[i, j, 2] < 0.2):
            person2_kps.append(kps)
    obj_kps = None
    if not init_kps["obj"] is None:
        obj_kps = []
        for kps in init_kps["obj"]:
            j, i = kps[0], kps[1]
            if lie_in_image((i, j), fake_mask.shape[:2]) and (fake_mask[i, j, 0] < 0.2) and (fake_mask[i, j, 1] < 0.2) and (fake_mask[i, j, 2] > 0.8):
                obj_kps.append(kps)
    
    kps = {
        "person1": np.int32(person1_kps),
        "person2": np.int32(person2_kps),
        "obj": np.int32(obj_kps) if not obj_kps is None else None,
    }
    return kps


def vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, predictor, cfg, object_data, paired_frames, device):
    if (not cfg["vis_person1"]) or (not cfg["vis_person2"]):
        print("[prepare 2D mask] error: not support for this cfg, skip this video!!!")
        return
    
    # CAD model
    obj_p = None
    if cfg["vis_obj"]:
        obj_mesh = o3d.io.read_triangle_mesh(object_data["model_path"])
        obj_p = np.float32(obj_mesh.sample_points_poisson_disk(30).points)  # initial object point prompt

    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))
    cam3_intrinsic, _ = txt2intrinsic(join(cam3_dir, "intrinsic.txt"))
    cam3_pose = np.loadtxt(join(cam3_dir, "camera2world.txt"))
    cam4_intrinsic, _ = txt2intrinsic(join(cam4_dir, "intrinsic.txt"))
    cam4_pose = np.loadtxt(join(cam4_dir, "camera2world.txt"))

    # get data
    assert isdir(join(data_dir, "SMPLX_fitting"))
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    person1_list, person1_SMPLX_params_list, person2_list, person2_SMPLX_params_list, rigid_pose_list = None, None, None, None, None
    if "/joints" in VTS_data:
        person1_list = VTS_data["/joints"]  # VTS person1 data
        person1_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_1"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if "/joints2" in VTS_data:
        person2_list = VTS_data["/joints2"]  # VTS person2 data
        person2_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_2"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if not object_data is None:
        rigid_pose_list = object_data["obj2world"]  # VTS aligned object pose
    
    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb3_imgs = mp42imgs(join(data_dir, "_d455_camera3_color_image_raw.mp4"))
    rgb4_imgs = mp42imgs(join(data_dir, "_d455_camera4_color_image_raw.mp4"))
    
    # init pytorch3d renderer
    pyt3d_wrapper1 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam1_intrinsic, extrin=np.linalg.inv(cam1_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper2 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam2_intrinsic, extrin=np.linalg.inv(cam2_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper3 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam3_intrinsic, extrin=np.linalg.inv(cam3_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper4 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam4_intrinsic, extrin=np.linalg.inv(cam4_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    
    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720 * 2
    vw = cv2.VideoWriter(join(data_dir, "vis_processed_2Dmask_human_only.mp4"), fourcc, 10, (W, H))
    
    for paired_frame_idx in range(len(paired_frames)):
        print("processing paired_frame", paired_frame_idx)
        paired_frame = paired_frames[paired_frame_idx]
        rgb1_idx = paired_frame[0]
        rgb2_idx = paired_frame[1]
        rgb3_idx = paired_frame[2]
        rgb4_idx = paired_frame[3]
        person1_idx = paired_frame[4]
        person2_idx = paired_frame[5]
        
        # get data
        rgb1 = rgb1_imgs[rgb1_idx]
        rgb2 = rgb2_imgs[rgb2_idx]
        rgb3 = rgb3_imgs[rgb3_idx]
        rgb4 = rgb4_imgs[rgb4_idx]
        person1_globalpos, _ = bvh2joint(person1_list[person1_idx], end_link_trans=default_end_link_trans(), return_local_rot=True)
        person1_SMPLX_params = person1_SMPLX_params_list[paired_frame_idx]
        person2_globalpos, _ = bvh2joint(person2_list[person2_idx], end_link_trans=default_end_link_trans(), return_local_rot=True)
        person2_SMPLX_params = person2_SMPLX_params_list[paired_frame_idx]
        obj_mesh = object_data["mesh"] if not object_data is None else None
        obj2world = rigid_pose_list[paired_frame_idx] if not object_data is None else None
        
        # (1) compute initial point prompts, shape = (N, 2)
        init_kps1 = compute_kps(person1_globalpos, person2_globalpos, obj2world, obj_p, cam1_intrinsic, np.linalg.inv(cam1_pose))
        init_kps2 = compute_kps(person1_globalpos, person2_globalpos, obj2world, obj_p, cam2_intrinsic, np.linalg.inv(cam2_pose))
        init_kps3 = compute_kps(person1_globalpos, person2_globalpos, obj2world, obj_p, cam3_intrinsic, np.linalg.inv(cam3_pose))
        init_kps4 = compute_kps(person1_globalpos, person2_globalpos, obj2world, obj_p, cam4_intrinsic, np.linalg.inv(cam4_pose))
        
        # (2) render SMPL&Objmesh based 2D mask
        num_pca_comps = person1_SMPLX_params["left_hand_pose"].shape[1]
        smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=num_pca_comps, flat_hand_mean=True)
        smplx_model.to(device)
        fake_mask1 = render_fake_mask(pyt3d_wrapper1, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask2 = render_fake_mask(pyt3d_wrapper2, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask3 = render_fake_mask(pyt3d_wrapper3, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask4 = render_fake_mask(pyt3d_wrapper4, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)

        # (3) filter point prompts
        kps1 = filter_kps(init_kps1, fake_mask1)
        kps2 = filter_kps(init_kps2, fake_mask2)
        kps3 = filter_kps(init_kps3, fake_mask3)
        kps4 = filter_kps(init_kps4, fake_mask4)

        # (4) render 2D kps
        img1 = render_kps(rgb1, kps1)
        img2 = render_kps(rgb2, kps2)
        img3 = render_kps(rgb3, kps3)
        img4 = render_kps(rgb4, kps4)
        
        # (5) compute 2D mask, shape = (H, W), dtype = uint8
        mask1 = compute_2Dmask_naive2(rgb1, kps1, predictor)
        mask2 = compute_2Dmask_naive2(rgb2, kps2, predictor)
        mask3 = compute_2Dmask_naive2(rgb3, kps3, predictor)
        mask4 = compute_2Dmask_naive2(rgb4, kps4, predictor)

        # (6) render 2D mask
        img1 = render_2Dmask(img1, mask1)
        img2 = render_2Dmask(img2, mask2)
        img3 = render_2Dmask(img3, mask3)
        img4 = render_2Dmask(img4, mask4)

        # save
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:720, :1280] = img1
        img[:720, 1280:] = img2
        img[720:, :1280] = img3
        img[720:, 1280:] = img4
        vw.write(img)
        
    vw.release()


if __name__ == "__main__":

    ############################################################################################################
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    date_dir = "/share/datasets/HHO_dataset/data/20230724"
    camera_calib_dir = "/home/liuyun/HHO-dataset/data_processing/camera_info"
    cfg = {
        "vis_person1": True,
        "vis_person2": True,
        "vis_obj": False,
    }
    device = "cuda:0"
    ############################################################################################################
    
    # camera info
    cam1_dir = join(camera_calib_dir, "d455_1")
    cam2_dir = join(camera_calib_dir, "d455_2")
    cam3_dir = join(camera_calib_dir, "d455_3")
    cam4_dir = join(camera_calib_dir, "d455_4")

    # init Segment-anything model
    sam = sam_model_registry["vit_h"](checkpoint="/home/liuyun/codebases/segment-anything/checkpoints/sam_vit_h_4b8939.pth")
    sam.to(device)
    predictor = SamPredictor(sam)

    for video_name in os.listdir(date_dir):
        
        if (not video_name == "001") and (not video_name == "024") and (not video_name == "012") and (not video_name == "038") and (not video_name == "044") and (not video_name == "016"):
            continue

        data_dir = join(date_dir, video_name)
        
        video_cfg = cfg.copy()
        
        assert isfile(join(data_dir, "aligned_frame_ids.txt"))
        paired_frames = txt_to_paried_frameids(join(data_dir, "aligned_frame_ids.txt"))
        
        obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)
        
        object_data = None
        if not isfile(join(data_dir, "aligned_objposes.npy")):
            video_cfg["vis_obj"] = False
        else:
            assert isfile(obj_model_path)
            obj2world = np.load(join(data_dir, "aligned_objposes.npy"))
            object_data = {
                "model_path": obj_model_path,
                "mesh": trimesh.load_mesh(obj_model_path),
                "obj2world": obj2world,
            }

        print("data_dir =", data_dir)
        
        vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, predictor, video_cfg, object_data, paired_frames, device=device)
