import os
from os.path import join, isdir, isfile
import numpy as np
import cv2
import pickle
from utils.txt2intrinsic import txt2intrinsic
from utils.time_align import txt_to_paried_frameids
import open3d as o3d


def visualize_world_rgbd(rgb_video_path, depth_dir, intrinsic_mat, cam_pose, frames=[], mask_path=None, img_name=None):
    cap = cv2.VideoCapture(rgb_video_path)
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    cap.set(cv2.CAP_PROP_FOURCC, fourcc)

    rgb_list = []
    idx = 0
    frame_cnt = -1
    while True:
        frame_cnt += 1
        suc, img = cap.read()
        if not suc:
            break
        if (idx < len(frames)) and (frame_cnt == frames[idx]):
            img = img[:, :, ::-1].astype(np.uint8)
            rgb = o3d.geometry.Image(img)
            rgb_list.append(rgb)
            idx += 1
    assert len(rgb_list) > 0

    depth_list = []
    for idx in frames:
        depth = o3d.io.read_image(join(depth_dir, str(idx) + ".png"))
        depth_list.append(depth)
    assert len(depth_list) > 0
    
    assert not mask_path is None
    mask_list = pickle.load(open(mask_path, "rb"))[img_name]

    pcds = {
        "person1": [],
        "person2": [],
        "obj": [],
    }

    for (rgb, depth, mask) in zip(rgb_list, depth_list, mask_list):
        depth_np_all = np.uint16(depth)
        # person1
        depth_np = depth_np_all.copy()
        depth_np[mask != 1] = 0
        depth = o3d.geometry.Image(depth_np)
        intrinsic = o3d.camera.PinholeCameraIntrinsic(1280, 720, intrinsic_mat[0, 0], intrinsic_mat[1, 1], intrinsic_mat[0, 2], intrinsic_mat[1, 2])
        rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, convert_rgb_to_intensity=False, depth_trunc=10.0)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
        points = np.float32(pcd.points)
        points = points @ cam_pose[:3, :3].T + cam_pose[:3, 3]
        pcd.points = o3d.utility.Vector3dVector(points)
        pcds["person1"].append(pcd)
        
        # person2
        depth_np = depth_np_all.copy()
        depth_np[mask != 2] = 0
        depth = o3d.geometry.Image(depth_np)
        intrinsic = o3d.camera.PinholeCameraIntrinsic(1280, 720, intrinsic_mat[0, 0], intrinsic_mat[1, 1], intrinsic_mat[0, 2], intrinsic_mat[1, 2])
        rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, convert_rgb_to_intensity=False, depth_trunc=10.0)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
        points = np.float32(pcd.points)
        points = points @ cam_pose[:3, :3].T + cam_pose[:3, 3]
        pcd.points = o3d.utility.Vector3dVector(points)
        pcds["person2"].append(pcd)
        
        # obj
        depth_np = depth_np_all.copy()
        depth_np[mask != 3] = 0
        depth = o3d.geometry.Image(depth_np)
        intrinsic = o3d.camera.PinholeCameraIntrinsic(1280, 720, intrinsic_mat[0, 0], intrinsic_mat[1, 1], intrinsic_mat[0, 2], intrinsic_mat[1, 2])
        rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, convert_rgb_to_intensity=False, depth_trunc=10.0)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
        points = np.float32(pcd.points)
        points = points @ cam_pose[:3, :3].T + cam_pose[:3, 3]
        pcd.points = o3d.utility.Vector3dVector(points)
        pcds["obj"].append(pcd)

    return pcds


def merge_pcds(person1_pcds, person2_pcds, obj_pcds, sv_pcds):
    if person1_pcds is None:
        person1_pcds = sv_pcds["person1"]
        person2_pcds = sv_pcds["person2"]
        obj_pcds = sv_pcds["obj"]
        return person1_pcds, person2_pcds, obj_pcds
    
    # person1
    new_pcds = sv_pcds["person1"]
    for i in range(len(person1_pcds)):
        if i > len(new_pcds):
            break
        p1 = np.float32(person1_pcds[i].points)
        c1 = np.float32(person1_pcds[i].colors)
        p2 = np.float32(new_pcds[i].points)
        c2 = np.float32(new_pcds[i].colors)
        person1_pcds[i].points = o3d.utility.Vector3dVector(np.concatenate((p1, p2), axis=0))
        person1_pcds[i].colors = o3d.utility.Vector3dVector(np.concatenate((c1, c2), axis=0))
    
    # person2
    new_pcds = sv_pcds["person2"]
    for i in range(len(person2_pcds)):
        if i > len(new_pcds):
            break
        p1 = np.float32(person2_pcds[i].points)
        c1 = np.float32(person2_pcds[i].colors)
        p2 = np.float32(new_pcds[i].points)
        c2 = np.float32(new_pcds[i].colors)
        person2_pcds[i].points = o3d.utility.Vector3dVector(np.concatenate((p1, p2), axis=0))
        person2_pcds[i].colors = o3d.utility.Vector3dVector(np.concatenate((c1, c2), axis=0))
    
    # obj
    new_pcds = sv_pcds["obj"]
    for i in range(len(obj_pcds)):
        if i > len(new_pcds):
            break
        p1 = np.float32(obj_pcds[i].points)
        c1 = np.float32(obj_pcds[i].colors)
        p2 = np.float32(new_pcds[i].points)
        c2 = np.float32(new_pcds[i].colors)
        obj_pcds[i].points = o3d.utility.Vector3dVector(np.concatenate((p1, p2), axis=0))
        obj_pcds[i].colors = o3d.utility.Vector3dVector(np.concatenate((c1, c2), axis=0))
        
    return person1_pcds, person2_pcds, obj_pcds


if __name__ == "__main__":
    
    ################################################################################################
    clip_dir = "/share/datasets/HHO_dataset/data/20230724"
    camera_calib_dir = "/home/liuyun/HHO-dataset/data_processing/camera_info"
    sampling_rate = 10  # 算mask时每10帧抽一帧存的final_2Dmasks.pkl, 这里按同样的抽帧率算点云
    ################################################################################################

    for video_name in os.listdir(clip_dir):
        
        if (not video_name == "001") and (not video_name == "012") and (not video_name == "016") and (not video_name == "024") and (not video_name == "028") and (not video_name == "038") and (not video_name == "040") and (not video_name == "044"):
            continue
        
        print("processing {} ...".format(video_name))
        
        data_dir = join(clip_dir, video_name)
        assert isfile(join(data_dir, "aligned_frame_ids.txt"))
        paired_frames = txt_to_paried_frameids(join(data_dir, "aligned_frame_ids.txt"))[::sampling_rate]
        mask_path = join(data_dir, "final_2Dmask.pkl")
        assert isfile(mask_path)
        save_dir = join(clip_dir, video_name, "HHO_mv_pcd")
        os.makedirs(save_dir, exist_ok=True)
        
        person1_pcds, person2_pcds, obj_pcds = None, None, None

        print("-- processing rgb1 ...")
        label = "_d455_camera1"
        intrinsic_mat, _ = txt2intrinsic(join(camera_calib_dir, "d455_1", "intrinsic.txt"))
        cam_pose = np.loadtxt(join(camera_calib_dir, "d455_1", "camera2world.txt"))
        if not isdir(join(data_dir, "decode" + label)):
            os.makedirs(join(data_dir, "decode" + label), exist_ok=True)
            os.system("ffmpeg -i {} -f image2 -start_number 0 -vf fps=fps=15 -qscale:v 2 {}/%d.png -loglevel quiet".format(join(data_dir, label + "_aligned_depth_to_color_image_raw.avi"), join(data_dir, "decode" + label)))
        sv_pcds = visualize_world_rgbd(join(data_dir, label + "_color_image_raw.mp4"), join(data_dir, "decode" + label), intrinsic_mat, cam_pose, frames=[pf[0] for pf in paired_frames], mask_path=mask_path, img_name="rgb1")
        person1_pcds, person2_pcds, obj_pcds = merge_pcds(person1_pcds, person2_pcds, obj_pcds, sv_pcds)

        print("-- processing rgb2 ...")
        label = "_d455_camera2"
        intrinsic_mat, _ = txt2intrinsic(join(camera_calib_dir, "d455_2", "intrinsic.txt"))
        cam_pose = np.loadtxt(join(camera_calib_dir, "d455_2", "camera2world.txt"))
        if not isdir(join(data_dir, "decode" + label)):
            os.makedirs(join(data_dir, "decode" + label), exist_ok=True)
            os.system("ffmpeg -i {} -f image2 -start_number 0 -vf fps=fps=15 -qscale:v 2 {}/%d.png -loglevel quiet".format(join(data_dir, label + "_aligned_depth_to_color_image_raw.avi"), join(data_dir, "decode" + label)))
        sv_pcds = visualize_world_rgbd(join(data_dir, label + "_color_image_raw.mp4"), join(data_dir, "decode" + label), intrinsic_mat, cam_pose, frames=[pf[1] for pf in paired_frames], mask_path=mask_path, img_name="rgb2")
        person1_pcds, person2_pcds, obj_pcds = merge_pcds(person1_pcds, person2_pcds, obj_pcds, sv_pcds)
        
        print("-- processing rgb3 ...")
        label = "_d455_camera3"
        intrinsic_mat, _ = txt2intrinsic(join(camera_calib_dir, "d455_3", "intrinsic.txt"))
        cam_pose = np.loadtxt(join(camera_calib_dir, "d455_3", "camera2world.txt"))
        if not isdir(join(data_dir, "decode" + label)):
            os.makedirs(join(data_dir, "decode" + label), exist_ok=True)
            os.system("ffmpeg -i {} -f image2 -start_number 0 -vf fps=fps=15 -qscale:v 2 {}/%d.png -loglevel quiet".format(join(data_dir, label + "_aligned_depth_to_color_image_raw.avi"), join(data_dir, "decode" + label)))
        sv_pcds = visualize_world_rgbd(join(data_dir, label + "_color_image_raw.mp4"), join(data_dir, "decode" + label), intrinsic_mat, cam_pose, frames=[pf[2] for pf in paired_frames], mask_path=mask_path, img_name="rgb3")
        person1_pcds, person2_pcds, obj_pcds = merge_pcds(person1_pcds, person2_pcds, obj_pcds, sv_pcds)
        
        print("-- processing rgb4 ...")
        label = "_d455_camera4"
        intrinsic_mat, _ = txt2intrinsic(join(camera_calib_dir, "d455_4", "intrinsic.txt"))
        cam_pose = np.loadtxt(join(camera_calib_dir, "d455_4", "camera2world.txt"))
        if not isdir(join(data_dir, "decode" + label)):
            os.makedirs(join(data_dir, "decode" + label), exist_ok=True)
            os.system("ffmpeg -i {} -f image2 -start_number 0 -vf fps=fps=15 -qscale:v 2 {}/%d.png -loglevel quiet".format(join(data_dir, label + "_aligned_depth_to_color_image_raw.avi"), join(data_dir, "decode" + label)))
        sv_pcds = visualize_world_rgbd(join(data_dir, label + "_color_image_raw.mp4"), join(data_dir, "decode" + label), intrinsic_mat, cam_pose, frames=[pf[3] for pf in paired_frames], mask_path=mask_path, img_name="rgb4")
        person1_pcds, person2_pcds, obj_pcds = merge_pcds(person1_pcds, person2_pcds, obj_pcds, sv_pcds)
        
        idx = 0
        for (pcd1, pcd2, pcd3) in zip(person1_pcds, person2_pcds, obj_pcds):
            o3d.io.write_point_cloud(join(save_dir, str(idx).zfill(4) + "_person1.ply"), pcd1)
            o3d.io.write_point_cloud(join(save_dir, str(idx).zfill(4) + "_person2.ply"), pcd2)
            o3d.io.write_point_cloud(join(save_dir, str(idx).zfill(4) + "_obj.ply"), pcd3)
            idx += sampling_rate
