# first: run /home/liuyun/codebases/Tracking-Anything-with-DEVA

import sys
sys.path.append("../..")
import os
from os.path import join, isfile, isdir
import numpy as np
import pickle
import cv2
from transforms3d.quaternions import quat2mat
import trimesh
from tqdm import tqdm
import torch
from pathlib import Path
from pytorch3d.renderer import AmbientLights
from data_processing.smplx import smplx
from data_processing.utils.VTS_object import get_obj_info
from data_processing.utils.time_align import txt_to_paried_frameids
from data_processing.prepare_2Dmask.utils.colors import FAKE_COLOR_LIST
from data_processing.utils.pyt3d_wrapper import Pyt3DWrapper
from data_processing.utils.visualization import render_HHO
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic, txt2timestamps
from data_recording.multi_camera.utils.video_parser import mp42imgs
import open3d as o3d
import imageio.v3 as iio
import time
import argparse
import json


def IoM1(m1, m2):
    return np.sum(m1 & m2) / max(np.sum(m1), 1)


def IoU(m1, m2):
    return np.sum(m1 & m2) / max(np.sum(m1 | m2), 1)


def read_imgs(path):
    imgs = []
    for file in sorted(Path(path).iterdir()):
        imgs.append(np.asarray(iio.imread(file)))
    return imgs


def read_intrinsic(path):
    with open(path, "r") as f:
        intrinsic = json.load(f)
        return np.asarray(intrinsic["intrinsic_matrix"]).reshape((3, 3)).T


def compute_2Dmask_select_precomputed_mask(seq_precomputed_2Dmask_info, camera_idx, frame_idx, fake_mask):
    """
    return an uint8 numpy array
    """
    assert FAKE_COLOR_LIST == [
        [1.0, 0.0, 0.0],  # person1
        [0.0, 1.0, 0.0],  # person2
        [0.0, 0.0, 1.0],  # object
    ]
    
    H, W = fake_mask.shape[:2]
    precomputed_mask_path = join(seq_precomputed_2Dmask_info["root_dir"], seq_precomputed_2Dmask_info["camera_names"][camera_idx], seq_precomputed_2Dmask_info["date"], seq_precomputed_2Dmask_info["seq_name"], "raw_masks", str(frame_idx).zfill(5) + ".npz")
    if not isfile(precomputed_mask_path):
        return np.ones((H, W)).astype(np.uint8)  # 目前数据不全,所以这种情况打个标记(全1)
    
    precomputed_masks = np.load(precomputed_mask_path, allow_pickle=True)["data"]
    person_masks = []
    obj_masks = []
    for m in precomputed_masks:
        resized_m = cv2.resize(m["mask"].astype(np.uint8), (W, H), interpolation=cv2.INTER_NEAREST)
        if m["class_id"] == 0:  # person
            person_masks.append(resized_m)
        else:  # object
            obj_masks.append(resized_m)
    
    mask = np.zeros((H, W)).astype(np.uint8)
    
    optimal_mask = {}
    # person1
    fm = (fake_mask[..., 0] > 10.0) & (fake_mask[..., 1] < 0.2) & (fake_mask[..., 2] < 0.2)
    om = {"mask": None, "idx": None, "IoU": 0}
    for i, m in enumerate(person_masks):
        iou = IoU(m, fm)
        if iou > om["IoU"]:
            om["mask"] = m
            om["idx"] = i
            om["IoU"] = iou
    optimal_mask["person1"] = om
    
    # person2
    fm = (fake_mask[..., 0] < 0.2) & (fake_mask[..., 1] > 10.0) & (fake_mask[..., 2] < 0.2)
    om = {"mask": None, "idx": None, "IoU": 0}
    for i, m in enumerate(person_masks):
        iou = IoU(m, fm)
        if iou > om["IoU"]:
            om["mask"] = m
            om["idx"] = i
            om["IoU"] = iou
    optimal_mask["person2"] = om
    
    # object
    fm = (fake_mask[..., 0] < 0.2) & (fake_mask[..., 1] < 0.2) & (fake_mask[..., 2] > 10.0)
    om = {"mask": None, "idx": None, "IoU": 0}
    for i, m in enumerate(obj_masks):
        iou = IoU(m, fm)
        if iou > om["IoU"]:
            om["mask"] = m
            om["idx"] = i
            om["IoU"] = iou
    optimal_mask["object"] = om
    
    # 去重(其实应该是两个person找最优匹配, 但实际上他们如果在上面选到了相同的mask则有一个person应该被强遮挡了,这个视角里直接忽略他即可)
    if (not optimal_mask["person1"]["idx"] is None) and (not optimal_mask["person2"]["idx"] is None) and (optimal_mask["person1"]["idx"] == optimal_mask["person2"]["idx"]):
        if optimal_mask["person1"]["IoU"] > optimal_mask["person2"]["IoU"]:
            optimal_mask["person2"] = {"mask": None, "idx": None, "IoU": 0}
        else:
            optimal_mask["person1"] = {"mask": None, "idx": None, "IoU": 0}
    
    # 整理结果
    for i, key in enumerate(["person1", "person2", "object"]):
        if not optimal_mask[key]["mask"] is None:
            mask[optimal_mask[key]["mask"] > 0] = i + 1
    
    return mask


def render_2Dmask(img, mask):
    color_palette = [
        (0, 0, 0),  # background
        (255, 255, 0),  # person1
        (128, 0, 128),  # person2
        (255, 0, 0),  # object
    ]

    mask_img = np.zeros(img.shape).astype(np.uint8)
    for i in range(4):
        mask_img[mask == i] = color_palette[i]

    new_img = img.astype(np.float32) * 0.5 + mask_img.astype(np.float32) * 0.5
    new_img = new_img.clip(0, 255).astype(np.uint8)
    return new_img


def read_SMPLX_params(data_dir, M, N, device):
    SMPLX_params = []
    for i in range(0, N, M):
        top_i = min(i + M, N)
        result_dir = join(data_dir, "{}to{}.npz".format(str(i), str(top_i - 1)))
        result = np.load(result_dir, allow_pickle=True)["results"].item()
        for j in range(top_i - i):
            SMPLX_params.append({
                "betas": result["betas"][j:j+1].to(device),
                "body_pose": result["body_pose"][j:j+1].to(device),
                "transl": result["transl"][j:j+1].to(device),
                "global_orient": result["global_orient"][j:j+1].to(device),
                "left_hand_pose": result["left_hand_pose"][j:j+1].to(device),
                "right_hand_pose": result["right_hand_pose"][j:j+1].to(device),
            })
    return SMPLX_params


def render_fake_mask(pyt3d_wrapper, smplx_model, person1_SMPLX_params, person2_SMPLX_params, object_mesh, obj2world):
    data = {
        "person1": person1_SMPLX_params,
        "person2": person2_SMPLX_params,
        "object": {
            "mesh": object_mesh,
            "obj2world": obj2world,
        } if not obj2world is None else None
    }
    img = render_HHO(pyt3d_wrapper, smplx_model, data, rgb_img=None, frame_idx=None, suffix="", save=True)
    return img


def vis_single_video(dataset_dir, data_id, intrinsic_dirs, extrinsic_dirs, seq_precomputed_2Dmask_info, save_dir, cfg, object_data, paired_frames, device):
    if (not cfg["vis_person1"]) or (not cfg["vis_person2"]):
        print("[prepare 2D mask] error: not support for this cfg, skip this video!!!")
        return
    
    data_dir = join(dataset_dir, "VTS", data_id)

    # cam params
    camera_intrinsics = [read_intrinsic(join(p, "intrinsic.json")) for p in intrinsic_dirs]
    camera_poses = [np.loadtxt(join(p, "camera2world.txt")) for p in extrinsic_dirs]

    # get data
    print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "] start loading data")
    assert isdir(join(data_dir, "SMPLX_fitting"))
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    person1_list, person1_SMPLX_params_list, person2_list, person2_SMPLX_params_list, rigid_pose_list = None, None, None, None, None
    if "/joints" in VTS_data:
        # person1_list = VTS_data["/joints"]  # VTS person1 data
        person1_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_1"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if "/joints2" in VTS_data:
        person2_list = VTS_data["/joints2"]  # VTS person2 data
        person2_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_2"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if not object_data is None:
        rigid_pose_list = object_data["obj2world"]  # VTS aligned object pose
    
    azure2_imgs = read_imgs(join(dataset_dir, "azure2", data_id, "vis", "color"))
    azure3_imgs = read_imgs(join(dataset_dir, "azure3", data_id, "vis", "color"))
    azure4_imgs = read_imgs(join(dataset_dir, "azure4", data_id, "vis", "color"))
    azure5_imgs = read_imgs(join(dataset_dir, "azure5", data_id, "vis", "color"))
    
    # init pytorch3d renderer
    pyt3d_wrapper2 = Pyt3DWrapper(image_size=(1920, 1080), use_fixed_cameras=True, intrin=camera_intrinsics[0], extrin=np.linalg.inv(camera_poses[0]), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper3 = Pyt3DWrapper(image_size=(1920, 1080), use_fixed_cameras=True, intrin=camera_intrinsics[1], extrin=np.linalg.inv(camera_poses[1]), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper4 = Pyt3DWrapper(image_size=(1920, 1080), use_fixed_cameras=True, intrin=camera_intrinsics[2], extrin=np.linalg.inv(camera_poses[2]), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper5 = Pyt3DWrapper(image_size=(1920, 1080), use_fixed_cameras=True, intrin=camera_intrinsics[3], extrin=np.linalg.inv(camera_poses[3]), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    
    # visualization
    os.makedirs(save_dir, exist_ok=True)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1920 * 2
    H = 1080 * 2
    vw1 = cv2.VideoWriter(join(save_dir, "vis_processed_2Dmask.mp4"), fourcc, 3, (W, H))
    vw2 = cv2.VideoWriter(join(save_dir, "vis_processed_2Dfake_mask.mp4"), fourcc, 3, (W, H))
    vw3 = cv2.VideoWriter(join(save_dir, "vis_processed_2Doverlaid_mask.mp4"), fourcc, 3, (W, H))
    
    final_mask = {
        "azure2": [],
        "azure3": [],
        "azure4": [],
        "azure5": [],
    }
    
    for paired_frame_idx in range(0, len(paired_frames), 3):
        print("[",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"]processing paired_frame", paired_frame_idx)
        paired_frame = paired_frames[paired_frame_idx]
        
        print(len(azure2_imgs), len(azure3_imgs), len(azure4_imgs), len(azure5_imgs), paired_frame)
        
        # get data
        azure2 = azure2_imgs[paired_frame[0]]
        azure3 = azure3_imgs[paired_frame[1]]
        azure4 = azure4_imgs[paired_frame[2]]
        azure5 = azure5_imgs[paired_frame[3]]
        person1_SMPLX_params = person1_SMPLX_params_list[paired_frame_idx]
        person2_SMPLX_params = person2_SMPLX_params_list[paired_frame_idx]
        obj_mesh = object_data["mesh"] if not object_data is None else None
        obj2world = rigid_pose_list[paired_frame_idx] if not object_data is None else None
        
        # (1) render SMPL&Objmesh based 2D mask
        num_pca_comps = person1_SMPLX_params["left_hand_pose"].shape[1]
        smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=num_pca_comps, flat_hand_mean=True)
        smplx_model.to(device)
        fake_mask2 = render_fake_mask(pyt3d_wrapper2, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask3 = render_fake_mask(pyt3d_wrapper3, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask4 = render_fake_mask(pyt3d_wrapper4, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask5 = render_fake_mask(pyt3d_wrapper5, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        
        # (2) compute 2D mask, shape = (H, W), dtype = uint8
        print("[",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"]start computing 2D mask")
        mask2 = compute_2Dmask_select_precomputed_mask(seq_precomputed_2Dmask_info, 0, paired_frame[0], fake_mask2)
        mask3 = compute_2Dmask_select_precomputed_mask(seq_precomputed_2Dmask_info, 1, paired_frame[1], fake_mask3)
        mask4 = compute_2Dmask_select_precomputed_mask(seq_precomputed_2Dmask_info, 2, paired_frame[2], fake_mask4)
        mask5 = compute_2Dmask_select_precomputed_mask(seq_precomputed_2Dmask_info, 3, paired_frame[3], fake_mask5)
        final_mask["azure2"].append(mask2)
        final_mask["azure3"].append(mask3)
        final_mask["azure4"].append(mask4)
        final_mask["azure5"].append(mask5)

        print("start rendering",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        # (3) render 2D mask
        img2 = azure2[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img3 = azure3[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img4 = azure4[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img5 = azure5[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img2 = render_2Dmask(img2, mask2)
        img3 = render_2Dmask(img3, mask3)
        img4 = render_2Dmask(img4, mask4)
        img5 = render_2Dmask(img5, mask5)

        # save
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:1080, :1920] = img2
        img[:1080, 1920:] = img3
        img[1080:, :1920] = img4
        img[1080:, 1920:] = img5
        vw1.write(img)
        
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:1080, :1920] = fake_mask2
        img[:1080, 1920:] = fake_mask3
        img[1080:, :1920] = fake_mask4
        img[1080:, 1920:] = fake_mask5
        vw2.write(img)
        
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:1080, :1920] = fake_mask2 / 2 + img2 / 2
        img[:1080, 1920:] = fake_mask3 / 2 + img3 / 2
        img[1080:, :1920] = fake_mask4 / 2 + img4 / 2
        img[1080:, 1920:] = fake_mask5 / 2 + img5 / 2
        vw3.write(img)
        
    vw1.release()
    vw2.release()
    
    pickle.dump(final_mask, open(join(save_dir, "final_2Dmask.pkl"), "wb"))


if __name__ == "__main__":

    ############################################################################################################
    dataset_dir = "/share/datasets/hhodataset/"
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final"
    precomputed_2Dmask_dir = "/data2/datasets/hhodataset/2Dmask_raw"
    save_root = "/data2/datasets/hhodataset/2Dmask"
    cfg = {
        "vis_person1": True,
        "vis_person2": True,
        "vis_obj": True,
    }
    device = "cuda:0"
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--clip_name", type=str)
    args = parser.parse_args()
    ############################################################################################################
    
    vts_dir = join(dataset_dir, "VTS")
    azure_extrinsic_dir = join(dataset_dir, "extrinsic")
    # camera info
    azure2_extrinsic_dir = join(azure_extrinsic_dir, "azure2", "20231101")
    azure3_extrinsic_dir = join(azure_extrinsic_dir, "azure3", "20230928")
    azure4_extrinsic_dir = join(azure_extrinsic_dir, "azure4", "20230926")
    azure5_extrinsic_dir = join(azure_extrinsic_dir, "azure5", "20231020")

    azure_intrinsic_dir = join(dataset_dir, "intrinsic")
    # camera info
    azure2_intrinsic_dir = join(azure_intrinsic_dir, "azure2")
    azure3_intrinsic_dir = join(azure_intrinsic_dir, "azure3")
    azure4_intrinsic_dir = join(azure_intrinsic_dir, "azure4")
    azure5_intrinsic_dir = join(azure_intrinsic_dir, "azure5")
    
    # for clip_name in os.listdir(vts_dir):
    for clip_name in [args.clip_name]:
        date_dir = join(vts_dir, clip_name)
        if "tst" in date_dir or not os.path.isdir(date_dir):
            continue
        for video_name in os.listdir(date_dir):
            data_dir = join(date_dir, video_name)
            data_id = join(clip_name, video_name)
            
            print("[start processing]", data_dir)
            video_cfg = cfg.copy()
                
            if not isfile(join(data_dir, "aligned_frame_ids.txt")):
                print(f"skipping {data_id} because no aligned_frame_ids.txt ...")
                continue
            paired_frames = txt_to_paried_frameids(join(data_dir, "aligned_frame_ids.txt"))
                
            obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)
                
            object_data = None
            if not isfile(join(data_dir, "aligned_objposes.npy")):
                video_cfg["vis_obj"] = False
            else:
                if not isfile(obj_model_path):
                    print(f"skipping {data_id} because no object model at {obj_model_path} ...")
                    continue
                obj2world = np.load(join(data_dir, "aligned_objposes.npy"))
                object_data = {
                    "model_path": obj_model_path,
                    "mesh": trimesh.load_mesh(obj_model_path),
                    "obj2world": obj2world,
                }

            print("data_dir =", data_dir)
            
            seq_precomputed_2Dmask_info = {
                "root_dir": precomputed_2Dmask_dir,
                "camera_names": ["azure2", "azure3", "azure4", "azure5"],
                "date": clip_name,
                "seq_name": video_name,
            }
            intrinsic_dirs = [azure2_intrinsic_dir, azure3_intrinsic_dir, azure4_intrinsic_dir, azure5_intrinsic_dir]
            extrinsic_dirs = [azure2_extrinsic_dir, azure3_extrinsic_dir, azure4_extrinsic_dir, azure5_extrinsic_dir]
            save_dir = join(save_root, clip_name, video_name)
            
            if isfile(join(save_dir, "final_2Dmask.pkl")):
                print("[skip] 2D mask of {} has been prepared before, skipping !!!".format(data_id))
                continue
            
            try:
                vis_single_video(dataset_dir, data_id, intrinsic_dirs, extrinsic_dirs, seq_precomputed_2Dmask_info, save_dir, video_cfg, object_data, paired_frames, device=device)
            except:
                print("[error] error in {} !!!".format(data_id))
