import sys
sys.path.append("../..")
import os
from os.path import join, isfile, isdir
import numpy as np
import pickle
import cv2
from transforms3d.quaternions import quat2mat
import trimesh
from tqdm import tqdm
import torch
from pytorch3d.renderer import AmbientLights
from data_processing.smplx import smplx
from segment_anything import SamPredictor, SamAutomaticMaskGenerator, sam_model_registry
from data_processing.utils.VTS_object import get_obj_info
from data_processing.utils.time_align import txt_to_paried_frameids
from data_processing.prepare_2Dmask.utils.colors import FAKE_COLOR_LIST
from data_processing.utils.pyt3d_wrapper import Pyt3DWrapper
from data_processing.utils.visualization import render_HHO
from data_recording.multi_camera.utils.bvh2joint import bvh2joint, default_end_link_trans
from data_recording.multi_camera.utils.txt_parser import txt2intrinsic, txt2timestamps
from data_recording.multi_camera.utils.video_parser import mp42imgs
import open3d as o3d
import time
import argparse


def IoM1(m1, m2):
    return np.sum(m1 & m2) / max(np.sum(m1), 1)


def IoU(m1, m2):
    return np.sum(m1 & m2) / max(np.sum(m1 | m2), 1)


def compute_2Dmask_segment_everything(img, fake_mask, predictor, threshould=0.5):
    """
    return an uint8 numpy array
    """
    assert FAKE_COLOR_LIST == [
        [1.0, 0.0, 0.0],  # person1
        [0.0, 1.0, 0.0],  # person2
        [0.0, 0.0, 1.0],  # object
    ]
    
    mask = np.zeros(img.shape[:2]).astype(np.uint8)
    
    # predict
    masks = predictor.generate(img)
    # print(len(masks), masks[0]["segmentation"].shape, masks[0]["segmentation"].max(), masks[0]["segmentation"].sum())  # 111, (720, 1280), True, 424027
    
    # person1
    fm = (fake_mask[..., 0] > 10.0) & (fake_mask[..., 1] < 0.2) & (fake_mask[..., 2] < 0.2)
    c1, c2, c3 = 0, 0, 0
    for m in masks:
        if IoM1(m["segmentation"], fm) > threshould:
            c1 += 1
            mask[m["segmentation"]] = 1
    # person2
    fm = (fake_mask[..., 0] < 0.2) & (fake_mask[..., 1] > 10.0) & (fake_mask[..., 2] < 0.2)
    for m in masks:
        if IoM1(m["segmentation"], fm) > threshould:
            c2 += 1
            mask[m["segmentation"]] = 2
    # object
    fm = (fake_mask[..., 0] < 0.2) & (fake_mask[..., 1] < 0.2) & (fake_mask[..., 2] > 10.0)
    for m in masks:
        if IoM1(m["segmentation"], fm) > threshould:
            c3 += 1
            mask[m["segmentation"]] = 3
    
    print("number of mask patch:", len(masks))
    print("number of valid mask patch:", c1, c2, c3)

    return mask


def render_2Dmask(img, mask):
    color_palette = [
        (0, 0, 0),  # background
        (255, 255, 0),  # person1
        (128, 0, 128),  # person2
        (255, 0, 0),  # object
    ]

    mask_img = np.zeros(img.shape).astype(np.uint8)
    for i in range(4):
        mask_img[mask == i] = color_palette[i]

    new_img = img.astype(np.float32) * 0.5 + mask_img.astype(np.float32) * 0.5
    new_img = new_img.clip(0, 255).astype(np.uint8)
    return new_img


def read_SMPLX_params(data_dir, M, N, device):
    SMPLX_params = []
    for i in range(0, N, M):
        top_i = min(i + M, N)
        result_dir = join(data_dir, "{}to{}.npz".format(str(i), str(top_i - 1)))
        result = np.load(result_dir, allow_pickle=True)["results"].item()
        for j in range(top_i - i):
            SMPLX_params.append({
                "betas": result["betas"][j:j+1].to(device),
                "body_pose": result["body_pose"][j:j+1].to(device),
                "transl": result["transl"][j:j+1].to(device),
                "global_orient": result["global_orient"][j:j+1].to(device),
                "left_hand_pose": result["left_hand_pose"][j:j+1].to(device),
                "right_hand_pose": result["right_hand_pose"][j:j+1].to(device),
            })
    return SMPLX_params


def render_fake_mask(pyt3d_wrapper, smplx_model, person1_SMPLX_params, person2_SMPLX_params, object_mesh, obj2world):
    data = {
        "person1": person1_SMPLX_params,
        "person2": person2_SMPLX_params,
        "object": {
            "mesh": object_mesh,
            "obj2world": obj2world,
        } if not obj2world is None else None
    }
    img = render_HHO(pyt3d_wrapper, smplx_model, data, rgb_img=None, frame_idx=None, suffix="", save=False)
    return img


def vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, predictor, cfg, object_data, paired_frames, device):
    if (not cfg["vis_person1"]) or (not cfg["vis_person2"]):
        print("[prepare 2D mask] error: not support for this cfg, skip this video!!!")
        return

    # cam params
    cam1_intrinsic, _ = txt2intrinsic(join(cam1_dir, "intrinsic.txt"))
    cam1_pose = np.loadtxt(join(cam1_dir, "camera2world.txt"))
    cam2_intrinsic, _ = txt2intrinsic(join(cam2_dir, "intrinsic.txt"))
    cam2_pose = np.loadtxt(join(cam2_dir, "camera2world.txt"))
    cam3_intrinsic, _ = txt2intrinsic(join(cam3_dir, "intrinsic.txt"))
    cam3_pose = np.loadtxt(join(cam3_dir, "camera2world.txt"))
    cam4_intrinsic, _ = txt2intrinsic(join(cam4_dir, "intrinsic.txt"))
    cam4_pose = np.loadtxt(join(cam4_dir, "camera2world.txt"))

    # get data
    print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "] start loading data")
    assert isdir(join(data_dir, "SMPLX_fitting"))
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    person1_list, person1_SMPLX_params_list, person2_list, person2_SMPLX_params_list, rigid_pose_list = None, None, None, None, None
    if "/joints" in VTS_data:
        person1_list = VTS_data["/joints"]  # VTS person1 data
        person1_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_1"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if "/joints2" in VTS_data:
        person2_list = VTS_data["/joints2"]  # VTS person2 data
        person2_SMPLX_params_list = read_SMPLX_params(join(data_dir, "SMPLX_fitting", "person_2"), M=50, N=len(paired_frames), device=device)  # optimized SMPLX params
    if not object_data is None:
        rigid_pose_list = object_data["obj2world"]  # VTS aligned object pose
    
    rgb1_imgs = mp42imgs(join(data_dir, "_d455_camera1_color_image_raw.mp4"))
    rgb2_imgs = mp42imgs(join(data_dir, "_d455_camera2_color_image_raw.mp4"))
    rgb3_imgs = mp42imgs(join(data_dir, "_d455_camera3_color_image_raw.mp4"))
    rgb4_imgs = mp42imgs(join(data_dir, "_d455_camera4_color_image_raw.mp4"))
    
    # init pytorch3d renderer
    pyt3d_wrapper1 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam1_intrinsic, extrin=np.linalg.inv(cam1_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper2 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam2_intrinsic, extrin=np.linalg.inv(cam2_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper3 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam3_intrinsic, extrin=np.linalg.inv(cam3_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    pyt3d_wrapper4 = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=True, intrin=cam4_intrinsic, extrin=np.linalg.inv(cam4_pose), device=device, colors=FAKE_COLOR_LIST, lights=AmbientLights(ambient_color=((0.5, 0.5, 0.5),), device=device))
    
    # visualization
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    W = 1280 * 2
    H = 720 * 2
    vw = cv2.VideoWriter(join(data_dir, "vis_processed_2Dmask.mp4"), fourcc, 3, (W, H))
    
    final_mask = {
        "rgb1": [],
        "rgb2": [],
        "rgb3": [],
        "rgb4": [],
    }
    
    for paired_frame_idx in range(0, len(paired_frames), 15): # 每10帧抽一帧(debug)
        print("[",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"]processing paired_frame", paired_frame_idx)
        paired_frame = paired_frames[paired_frame_idx]
        rgb1_idx = paired_frame[0]
        rgb2_idx = paired_frame[1]
        rgb3_idx = paired_frame[2]
        rgb4_idx = paired_frame[3]
        
        # get data
        rgb1 = rgb1_imgs[rgb1_idx]
        rgb2 = rgb2_imgs[rgb2_idx]
        rgb3 = rgb3_imgs[rgb3_idx]
        rgb4 = rgb4_imgs[rgb4_idx]
        person1_SMPLX_params = person1_SMPLX_params_list[paired_frame_idx]
        person2_SMPLX_params = person2_SMPLX_params_list[paired_frame_idx]
        obj_mesh = object_data["mesh"] if not object_data is None else None
        obj2world = rigid_pose_list[paired_frame_idx] if not object_data is None else None
        
        # (1) render SMPL&Objmesh based 2D mask
        num_pca_comps = person1_SMPLX_params["left_hand_pose"].shape[1]
        smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=num_pca_comps, flat_hand_mean=True)
        smplx_model.to(device)
        fake_mask1 = render_fake_mask(pyt3d_wrapper1, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask2 = render_fake_mask(pyt3d_wrapper2, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask3 = render_fake_mask(pyt3d_wrapper3, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        fake_mask4 = render_fake_mask(pyt3d_wrapper4, smplx_model, person1_SMPLX_params, person2_SMPLX_params, obj_mesh, obj2world)
        
        # (2) compute 2D mask, shape = (H, W), dtype = uint8
        print("[",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"]start computing 2D mask")
        mask1 = compute_2Dmask_segment_everything(rgb1, fake_mask1, predictor)
        mask2 = compute_2Dmask_segment_everything(rgb2, fake_mask2, predictor)
        mask3 = compute_2Dmask_segment_everything(rgb3, fake_mask3, predictor)
        mask4 = compute_2Dmask_segment_everything(rgb4, fake_mask4, predictor)
        final_mask["rgb1"].append(mask1)
        final_mask["rgb2"].append(mask2)
        final_mask["rgb3"].append(mask3)
        final_mask["rgb4"].append(mask4)

        print("start rendering",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        # (3) render 2D mask
        img1 = rgb1[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img2 = rgb2[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img3 = rgb3[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img4 = rgb4[:, :, ::-1].astype(np.uint8)  # rgb2bgr
        img1 = render_2Dmask(img1, mask1)
        img2 = render_2Dmask(img2, mask2)
        img3 = render_2Dmask(img3, mask3)
        img4 = render_2Dmask(img4, mask4)

        # save
        img = np.zeros((H, W, 3)).astype(np.uint8)
        img[:720, :1280] = img1
        img[:720, 1280:] = img2
        img[720:, :1280] = img3
        img[720:, 1280:] = img4
        vw.write(img)
        
    vw.release()
    
    pickle.dump(final_mask, open(join(data_dir, "final_2Dmask.pkl"), "wb"))


if __name__ == "__main__":

    ############################################################################################################
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    # date_dir = "/share/datasets/HHO_dataset/data/20230724"
    # camera_calib_dir = "/home/liuyun/HHO-dataset/data_processing/camera_info"
    dataset_dir = "/share/datasets/HHO_dataset/"
    clip_name = "20230805_1"
    cfg = {
        "vis_person1": True,
        "vis_person2": True,
        "vis_obj": True,
    }
    device = "cuda:0"
    ############################################################################################################
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--clip_name", type=str)
    args = parser.parse_args()
    if not args.clip_name is None:
        clip_name = args.clip_name
    print("clip_name =", clip_name)
    
    date_dir = join(dataset_dir, "data", clip_name)
    camera_calib_dir = join(dataset_dir, "rawdata", clip_name, "param")
    # camera info
    cam1_dir = join(camera_calib_dir, "d455_1")
    cam2_dir = join(camera_calib_dir, "d455_2")
    cam3_dir = join(camera_calib_dir, "d455_3")
    cam4_dir = join(camera_calib_dir, "d455_4")
    # /share/datasets/HHO_dataset/rawdata/20230805_2/param/d455_2
    # join(dataset_dir, "rawdata", clip_name, "param", camera_name, "intrinsic.txt")

    # init Segment-anything model
    sam = sam_model_registry["vit_h"](checkpoint="/home/liuyun/codebases/segment-anything/checkpoints/sam_vit_h_4b8939.pth")
    sam.to(device)
    predictor = SamAutomaticMaskGenerator(sam)

    for video_name in os.listdir(date_dir):
        try:
            
            # if (not video_name == "001") and (not video_name == "024") and (not video_name == "012") and (not video_name == "038") and (not video_name == "044") and (not video_name == "016"):
            #     continue

            data_dir = join(date_dir, video_name)
            # if exists, skip
            if isfile(join(data_dir, "final_2Dmask.pkl")):
                print("[prepare 2D mask] skip:", data_dir)
                continue
            print("[start processing]", data_dir)
            video_cfg = cfg.copy()
            
            assert isfile(join(data_dir, "aligned_frame_ids.txt"))
            paired_frames = txt_to_paried_frameids(join(data_dir, "aligned_frame_ids.txt"))
            
            obj_name, obj_model_path = get_obj_info(data_dir, obj_dataset_dir)
            
            object_data = None
            if not isfile(join(data_dir, "aligned_objposes.npy")):
                video_cfg["vis_obj"] = False
            else:
                assert isfile(obj_model_path)
                obj2world = np.load(join(data_dir, "aligned_objposes.npy"))
                object_data = {
                    "model_path": obj_model_path,
                    "mesh": trimesh.load_mesh(obj_model_path),
                    "obj2world": obj2world,
                }

            print("data_dir =", data_dir)
            
            vis_single_video(data_dir, cam1_dir, cam2_dir, cam3_dir, cam4_dir, predictor, video_cfg, object_data, paired_frames, device=device)
        except Exception as e:
            print("[prepare 2D mask] error:", e)
            continue
