import os
from os.path import join, isfile
import numpy as np
import cv2
import ffmpeg
from pprint import pprint
from transforms3d.quaternions import quat2mat
from utils.bvh2joint import bvh2joint, default_end_link_trans
from utils.txt_parser import txt2intrinsic, txt2timestamps
from utils.video_parser import mp42imgs
import open3d as o3d
import subprocess
import re


def get_obj_name_correspondance():
    corr = {
        "chair": "椅子",
        "desk": "桌子",
        "box": "箱子",
        "board": "板子",
        "bucket": "bucket",
        "stick": "stick",
    }
    return corr


def get_obj_info(data_dir, obj_dataset_dir):
    VTS_path = join(data_dir, "VTS_data.npz")
    assert isfile(VTS_path)
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()
    labels = VTS_data["/labels"]

    # get object name
    N_obj = {}
    for device_names in labels:
        for device_name in device_names:
            if not device_name in N_obj:
                N_obj[device_name] = 0
            N_obj[device_name] += 1
    mx, obj_name = -1, None
    N_obj["action1"] = 0
    for name in N_obj:
        if mx < N_obj[name]:
            mx = N_obj[name]
            obj_name = name
    assert not obj_name is None

    corr = get_obj_name_correspondance()
    obj_model_path = join(
        obj_dataset_dir,
        corr[obj_name[:-3]],
        corr[obj_name[:-3]] + str(int(obj_name[-3:])),
        obj_name[:-3] + str(int(obj_name[-3:])) + "_m.obj",
    )
    if not isfile(obj_model_path):
        obj_model_path = join(
            obj_dataset_dir,
            corr[obj_name[:-3]],
            corr[obj_name[:-3]] + str(int(obj_name[-3:])),
            obj_name[:-3] + str(int(obj_name[-3:])).zfill(3) + "_m.obj",
        )
    return obj_name, obj_model_path


def render(
    img, joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic
):
    # world -> camera -> image
    # person1
    if not joint1_data is None:
        p = np.concatenate(
            (joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1
        )  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint1_pixels = uv.astype(np.int32)
    # person2
    if not joint2_data is None:
        p = np.concatenate(
            (joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1
        )  # (74, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (74, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
        joint2_pixels = uv.astype(np.int32)
    # object
    if not obj2world is None:
        p = np.concatenate(
            (obj_p, np.ones((obj_p.shape[0], 1))), axis=-1
        )  # (500, 4), in object space
        p = p @ obj2world.T  # (500, 4), in world space
        p = p @ camera_extrinsic.transpose(1, 0)
        p = p[:, :3]  # (500, 3), in camera space
        uv = p @ camera_intrinsic.transpose(1, 0)
        uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
        obj_pixels = uv.astype(np.int32)
    else:
        obj_pixels = None

    # render
    img = img[:, :, ::-1].astype(np.uint8)  # rgb2bgr
    if not joint1_data is None:
        for p in joint1_pixels:
            cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
    if not joint2_data is None:
        for p in joint2_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
    if not obj2world is None:
        for p in obj_pixels:
            cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)
    return img


def vis_single_video(
    data_dir, egocam_dir, egocam_name, obj_name, obj_model_path, cfg, VTS_add_time
):
    # CAD model
    obj_p = None
    if cfg["vis_obj"]:
        obj_mesh = o3d.io.read_triangle_mesh(obj_model_path)
        obj_p = np.float32(obj_mesh.sample_points_poisson_disk(500).points)

    # cam params
    cam_intrinsic, _ = txt2intrinsic(join(egocam_dir, "intrinsic.txt"))
    cam_to_tracker = np.loadtxt(join(egocam_dir, "camera2tracker_pwr.txt"))
    cam_to_VTS_k_rigid = np.loadtxt(join(egocam_dir, "camera2tracker_k_rigid.txt"))
    cam_to_VTS_my_rigid = np.loadtxt(join(egocam_dir, "camera2tracker_my_rigid.txt"))

    # get data
    VTS_data = np.load(join(data_dir, "VTS_data.npz"), allow_pickle=True)["data"].item()

    # PWR ego  pwr_timestamp
    assert ("/pwr" in VTS_data) and ("/VTS_rigid" in VTS_data)
    ego_tracker_pose_list = []
    ego_tracker_timestamps = []
    for poses, labels, ts in zip(
        VTS_data["/pwr"], VTS_data["/pwr_ids"], VTS_data["pwr_timestamp"]
    ):
        idx = None
        for i, label in enumerate(labels):
            # if label == "116":  # PWR head ID for person1
            if label == "111":  # PWR head ID for person2
                idx = i
                break
        if idx is None:
            continue
        ego_tracker_pose_list.append(poses[idx])
        # ego_tracker_timestamps.append(ts)
        ego_tracker_timestamps.append(ts + VTS_add_time)
    N_ego_tracker = len(ego_tracker_timestamps)

    # k rigid ego VTS_rigid_timestamp
    ego_VTS_rigid_pose_list = VTS_data["/VTS_rigid"]
    ego_VTS_rigid_timestamps = VTS_data["VTS_rigid_timestamp"]
    N_ego_VTS_rigid = len(ego_VTS_rigid_timestamps)
    ego_VTS_rigid_timestamps = [
        x + VTS_add_time for x in VTS_data["VTS_rigid_timestamp"]
    ]

    # my rigid ego
    ego_my_rigid_pose_list = []
    ego_my_rigid_timestamps = []
    N_ego_my_rigid = 0
    if cfg["use_my_rigid"]:
        for poses, labels_t, ts in zip(
            VTS_data["/rigid"], VTS_data["/labels"], VTS_data["rigid_timestamp"]
        ):
            idx = None
            for i, label in enumerate(labels_t):
                if label == "action1":
                    idx = i
                    break
            if idx is None:
                continue
            ego_my_rigid_pose_list.append(poses[idx])
            ego_my_rigid_timestamps.append(ts + VTS_add_time)
        N_ego_my_rigid = len(ego_my_rigid_timestamps)

    if cfg["vis_obj"]:
        rigid_pose_list = VTS_data["/rigid"]
        # rigid_timestamps = VTS_data["rigid_timestamp"]
        # object  rigid_timestamp
        rigid_timestamps = [x + VTS_add_time for x in VTS_data["rigid_timestamp"]]
        labels = VTS_data["/labels"]
        N_rigid = len(rigid_timestamps)
    if cfg["vis_person1"]:
        person1_list = VTS_data["/joints"]
        person1_timestamps = [x + VTS_add_time for x in VTS_data["person1_timestamp"]]
        N_person1 = len(person1_timestamps)
    if cfg["vis_person2"]:
        person2_list = VTS_data["/joints2"]
        person2_timestamps = [x + VTS_add_time for x in VTS_data["person2_timestamp"]]
        N_person2 = len(person2_timestamps)
    rgb_imgs = mp42imgs(join(data_dir, egocam_name + ".mp4"))
    rgb_timestamps = txt2timestamps(join(data_dir, egocam_name + "_timestamp.txt"))
    N_rgb = len(rgb_timestamps)

    # align data
    threshould = 40000000  # 40ms
    data_list = []
    (
        p_ego_tracker,
        p_ego_VTS_rigid,
        p_rigid,
        p_person1,
        p_person2,
        p_rgb2,
        p_ego_my_rigid,
    ) = (0, 0, 0, 0, 0, 0, 0)
    print(N_rgb)

    # npz used in trace_obj
    trace_data_list = {
        "action1": {"pose": [], "time": []},
        "pwr": {"pose": [], "time": []},
    }

    for rgb_idx in range(N_rgb):
        t = rgb_timestamps[rgb_idx]
        # ego_k_tracker_pose align with rgb
        while (p_ego_tracker + 1 < N_ego_tracker) and (
            abs(t - ego_tracker_timestamps[p_ego_tracker + 1])
            <= abs(t - ego_tracker_timestamps[p_ego_tracker])
        ):
            p_ego_tracker += 1
        # ego_VTS_rigid align with rgb
        while (p_ego_VTS_rigid + 1 < N_ego_VTS_rigid) and (
            abs(t - ego_VTS_rigid_timestamps[p_ego_VTS_rigid + 1])
            <= abs(t - ego_VTS_rigid_timestamps[p_ego_VTS_rigid])
        ):
            p_ego_VTS_rigid += 1
        # obj_pose align with rgb
        if cfg["vis_obj"]:
            while (p_rigid + 1 < N_rigid) and (
                abs(t - rigid_timestamps[p_rigid + 1])
                <= abs(t - rigid_timestamps[p_rigid])
            ):
                p_rigid += 1
        # person1_pose align with rgb
        if cfg["vis_person1"]:
            while (p_person1 + 1 < N_person1) and (
                abs(t - person1_timestamps[p_person1 + 1])
                <= abs(t - person1_timestamps[p_person1])
            ):
                p_person1 += 1
        # person2_pose align with rgb
        if cfg["vis_person2"]:
            while (p_person2 + 1 < N_person2) and (
                abs(t - person2_timestamps[p_person2 + 1])
                <= abs(t - person2_timestamps[p_person2])
            ):
                p_person2 += 1
        # p_ego_my_rigid align with rgb
        if cfg["use_my_rigid"]:
            while (p_ego_my_rigid + 1 < N_ego_my_rigid) and (
                abs(t - ego_my_rigid_timestamps[p_ego_my_rigid + 1])
                <= abs(t - ego_my_rigid_timestamps[p_ego_my_rigid])
            ):
                p_ego_my_rigid += 1

        flag = abs(t - ego_tracker_timestamps[p_ego_tracker]) < threshould
        flag &= abs(t - ego_VTS_rigid_timestamps[p_ego_VTS_rigid]) < threshould
        if cfg["vis_obj"]:
            flag &= abs(t - rigid_timestamps[p_rigid]) < threshould
        if cfg["vis_person1"]:
            flag &= abs(t - person1_timestamps[p_person1]) < threshould
        if cfg["vis_person2"]:
            flag &= abs(t - person2_timestamps[p_person2]) < threshould
        if cfg["use_my_rigid"]:
            flag &= abs(t - ego_my_rigid_timestamps[p_ego_my_rigid]) < threshould

        if not flag:
            print("[error in preparing paired data] wrong frame idx =", rgb_idx)
            continue

        # tracker2world = pwr(116) pose
        tracker2world = np.eye(4)
        tracker2world[:3, 3] = ego_tracker_pose_list[p_ego_tracker]["position"]
        tracker2world[:3, :3] = quat2mat(
            ego_tracker_pose_list[p_ego_tracker]["orientation"]
        )
        cam_extrinsic = np.linalg.inv(tracker2world @ cam_to_tracker)

        # VTS_rigid_to_world = the nearest VTS_rigid to pwr(116)
        VTS_rigid_to_world = None
        min_dist = 10000
        min_i = 0
        assert len(ego_VTS_rigid_pose_list[p_ego_VTS_rigid]) > 0
        for i, vr_pose in enumerate(ego_VTS_rigid_pose_list[p_ego_VTS_rigid]):
            t = vr_pose["position"].reshape(3)
            min_i = i
            dist = np.linalg.norm(t - tracker2world[:3, 3], ord=2)
            if dist < min_dist:
                min_dist = dist
                VTS_rigid_to_world = np.eye(4)
                VTS_rigid_to_world[:3, 3] = vr_pose["position"]
                VTS_rigid_to_world[:3, :3] = quat2mat(vr_pose["orientation"])
            # if dist > 0.05:  # 5cm
            # print("[error in computing VTS_rigid pose] wrong frame idx =", rgb_idx)
            
            
        print(min_dist * 100, "cm at ", min_i)
        if min_dist * 100 <= 8:
            cam_extrinsic = np.linalg.inv(VTS_rigid_to_world @ cam_to_VTS_k_rigid)
            trace_data_list["pwr"]["pose"].append(vr_pose)
            trace_data_list["pwr"]["time"].append(ego_VTS_rigid_timestamps[p_ego_VTS_rigid])
        else:
            trace_data_list["pwr"]["pose"].append(ego_tracker_pose_list[p_ego_tracker])
            trace_data_list["pwr"]["time"].append(ego_tracker_timestamps[p_ego_tracker])

        if cfg["use_my_rigid"]:
            ego_my_rigid_pose = ego_my_rigid_pose_list[p_ego_my_rigid]
            my_rigid_to_world = np.eye(4)
            # pprint(ego_my_rigid_pose)
            my_rigid_to_world[:3, 3] = ego_my_rigid_pose["position"]
            my_rigid_to_world[:3, :3] = quat2mat(ego_my_rigid_pose["orientation"])
            cam_extrinsic = np.linalg.inv(my_rigid_to_world @ cam_to_VTS_my_rigid)
            trace_data_list["action1"]["pose"].append(ego_my_rigid_pose)
            trace_data_list["action1"]["time"].append(ego_my_rigid_timestamps[p_ego_my_rigid])

        if cfg["vis_obj"]:
            obj2world = None
            rigid_poses = rigid_pose_list[p_rigid]
            device_names = labels[p_rigid]
            obj_label = None
            for i, device_name in enumerate(device_names):
                if device_name == obj_name:
                    obj_label = i
            if not obj_label is None:
                obj2world = np.eye(4)
                obj2world[:3, 3] = rigid_poses[obj_label]["position"]
                obj2world[:3, :3] = quat2mat(rigid_poses[obj_label]["orientation"])
            # helmet12world = None
            # helmet1_label = None
            # for i, device_name in enumerate(device_names):
            #     if device_name == "helmet1":
            #         helmet1_label = i
            # if not helmet1_label is None:
            #     helmet12world = np.eye(4)
            #     helmet12world[:3, 3] = rigid_poses[helmet1_label]["position"]
            #     helmet12world[:3, :3] = quat2mat(rigid_poses[helmet1_label]["orientation"])

        if cfg["vis_person1"]:
            joint1_globalpos, joint1_localrot = bvh2joint(
                person1_list[p_person1],
                end_link_trans=default_end_link_trans(),
                return_local_rot=True,
            )
        if cfg["vis_person2"]:
            joint2_globalpos, joint2_localrot = bvh2joint(
                person2_list[p_person2],
                end_link_trans=default_end_link_trans(),
                return_local_rot=True,
            )
        try:
            data_list.append(
                {
                    "timestamp": t,
                    "rgb": rgb_imgs[rgb_idx],
                    "cam_extrinsic": cam_extrinsic,
                    "obj2world": obj2world if cfg["vis_obj"] else None,
                    "person1": {
                        "globalpos": joint1_globalpos,
                        "localrot": joint1_localrot,
                    }
                    if cfg["vis_person1"]
                    else {"globalpos": None, "localrot": None},
                    "person2": {
                        "globalpos": joint2_globalpos,
                        "localrot": joint2_localrot,
                    }
                    if cfg["vis_person2"]
                    else {"globalpos": None, "localrot": None},
                }
            )
        except:
            print("[error in append data_list] wrong frame idx =", rgb_idx)


    np.savez(join(data_dir, "trace.npz"), data=trace_data_list)

    # visualization
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    W = 1920
    H = 1080
    vw = cv2.VideoWriter(join(data_dir, "vis_ego_HHO_pose.mp4"), fourcc, 30, (W, H))
    for data in data_list:
        img = render(
            data["rgb"],
            data["person1"]["globalpos"],
            data["person2"]["globalpos"],
            data["obj2world"],
            obj_p,
            cam_intrinsic,
            data["cam_extrinsic"],
        )
        vw.write(img)
    vw.release()


def read_timemap(timemap_path):
    data = np.load(timemap_path, allow_pickle=True)["data"]
    data_dict = {}
    for d in data:
        data_dict[d["data"]] = d["timestamp"]
    return data_dict


def prepare_egovideo(data_dir, egocam_videoname, timemap_path, egocam_name):
    # timemap = read_timemap(timemap_path)

    video_path = join(data_dir, egocam_videoname)
    # start_timedata = ffmpeg.probe(video_path)["streams"][0]["tags"]["timecode"]

    # get audio from mp4
    audio_args = ["ffmpeg", "-i", video_path, join(data_dir, egocam_name + ".wav")]
    try:
        subprocess.run(audio_args, check=True)
    except subprocess.CalledProcessError as e:
        print("[error] get audio from mp4 failed!", e)

    # print(start_timedata)
    # start_timedata = "00:07:52:12"  # TODO: !!!
    # print(timemap)
    # assert start_timedata in timemap  # TODO: fix bugs in Ubuntu2 time_mapping

    # T = int(timemap[start_timedata] * 1e9) - 1100000000  # TODO: !!!

    # print meta
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    cap = cv2.VideoCapture(video_path)
    cap.set(cv2.CAP_PROP_FOURCC, fourcc)
    fps = cap.get(cv2.CAP_PROP_FPS)
    W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    print("fps: ", fps, "resolution: ", W, "x", H)
    print("total frames:")
    frames_args = [
        "ffprobe",
        "-v",
        "error",
        "-select_streams",
        "v:0",
        "-show_entries",
        "stream=nb_frames",
        "-of",
        "default=nokey=1:noprint_wrappers=1",
        join(data_dir, egocam_videoname),
    ]
    frame_count = 0
    try:
        result = subprocess.run(frames_args, check=True, capture_output=True, text=True)
    except subprocess.CalledProcessError as e:
        print("[error] get video frames count fail")
    finally:
        frame_count = result.stdout
        print(frame_count)

    # get video only
    video_args = [
        "ffmpeg",
        "-i",
        video_path,
        "-an",
        "-c:v",
        "copy",
        "-f",
        "mp4",
        join(data_dir, egocam_name + ".mp4"),
    ]
    try:
        subprocess.run(video_args, check=True)
    except subprocess.CalledProcessError as e:
        print("[error] get video  fail")

    # get timecode from audio
    time_args = ["ltcdump", "-c", "2", "-F", join(data_dir, egocam_name + ".wav")]
    try:
        with open(join(data_dir, egocam_name + ".txt"), "w") as f:
            subprocess.run(time_args, check=True, stdout=f)
    except subprocess.CalledProcessError as e:
        print("[error] get timestamp fail")

    # decode file
    timecode_count = 0
    flag_first = True
    timecode_first = ""
    pos_first = ""
    f = open(join(data_dir, egocam_name + ".txt"), "r")
    for line in f:
        if line.startswith("#"):
            continue
        if flag_first:
            strings = line.strip().split()
            if not re.match(r"^\d{8}$", strings[0]):
                continue
            if not re.match(r"^\d{2}:\d{2}:\d{2}:\d{2}", strings[1]):
                continue
            timecode_first = strings[1]
            pos_first = strings[3]
            flag_first = False
        timecode_count += 1
    f.close()
    SAMPLE_INTERVAL = 1600.0
    timecode_loss_count = int(round(int(pos_first) / SAMPLE_INTERVAL, 0))
    print(
        "\ntimecode frame ",
        timecode_count,
        " video frame ",
        frame_count,
        " first ",
        timecode_first,
        " at ",
        pos_first,
        "timecode loss: ",
        timecode_loss_count,
        "\n",
    )
    # if timecode_loss_count != 0:

    # _INTERVAL = 1600.0
    # timecode

    # decode ltc into xxx_timestamp.txt
    ltc_count = 0
    timestamps = []
    with open(timemap_path, "r") as f:
        for line in f:
            if line.startswith("#"):
                continue
            strings = line.strip().split()
            timecode = strings[1]
            if ltc_count == 0 and timecode == timecode_first:
                ltc_count += 1
                timestamps.append(
                    strings[6].replace(".", "")
                )  # TODO count with start time
                continue
            if ltc_count > 0 and ltc_count < timecode_count:
                timestamps.append(
                    strings[6].replace(".", "")
                )  # TODO count with start time
                ltc_count += 1
    if ltc_count != timecode_count:
        print("ltc_count, timecode_count ", ltc_count, " ", timecode_count)
        print("lose timecode in ltc.txt")
        exit(0)

    f_timestamp = open(join(data_dir, egocam_name + "_timestamp.txt"), "w")
    f_timestamp.write(",".join(timestamps))
    f_timestamp.close()
    return pos_first

    # exit(0)
    # vw = cv2.VideoWriter(join(data_dir, egocam_name + ".mp4"), fourcc, fps, (W, H))

    # suc = cap.isOpened()
    # frame_cnt = -1
    # ts = []
    # while True:
    #     frame_cnt += 1
    #     suc, img = cap.read()
    #     if not suc:
    #         break
    #     vw.write(img)
    #     ts.append(str(T + int(1e9 / fps * frame_cnt)))
    # cap.release()
    # vw.release()

    # with open(join(data_dir, egocam1_name + "_timestamp.txt"), "w") as f:
    #     f.write(",".join(ts))


if __name__ == "__main__":
    ############################################################################################################
    obj_dataset_dir = "/home/liuyun/HHO/object_dataset_final"
    data_root_dir = "/home/liuyun/HHO/multi-camera/data"
    # data_dir = "/home/liuyun/HHO/multi-camera/data/20230731/test003"

    seq_name = "test014"  # only handle one video use it

    # VTS_add_time = -600000000  # TODO: change it!!!
    # VTS_add_time = 150000000
    VTS_add_time =    38552698
    # VTS_add_time = 200000000  # TODO: change it!!!
    egocam1_name = "ego1"
    # egocam1_dir = "/media/liuyun/4A21-0000/DCIM/DJI_001"
    date = "2023-08-07"
    cfg = {
        "vis_obj": True,
        "vis_person1": True,
        "vis_person2": True,
        "use_my_rigid": True,
    }
    egocam1_dir = "/home/liuyun/HHO/collect_rawdata/camera_calib/Osmo_1"
    ############################################################################################################

    data_dir = join(data_root_dir, date.replace("-", ""))

    for seq in os.listdir(data_dir):
        egocam1_videoname = ""
        if seq != seq_name:
            continue

        for name in os.listdir(join(data_dir, seq)):
            if re.match(r"^DJI.*\.MP4$", name):
                egocam1_videoname = name
                break
        if not egocam1_videoname:
            print(seq, " find not ego video, skipped")
            continue
        pos_first = prepare_egovideo(
            join(data_dir, seq),
            egocam1_videoname,
            join(data_dir, "ltc.txt"),
            egocam1_name,
        )

        VTS_add_time -= int(
            1000000000 / 29.97 * int(pos_first) / 1600.0
        )  # 1000000000 = 1s  1600 sample/frame
        print("VTS : ", VTS_add_time)
        obj_name, obj_model_path = None, None
        if cfg["vis_obj"]:
            obj_name, obj_model_path = get_obj_info(
                join(data_dir, seq), obj_dataset_dir
            )

        print("data_dir, obj_name =", join(data_dir, seq), obj_name)

        vis_single_video(
            join(data_dir, seq),
            egocam1_dir,
            egocam1_name,
            obj_name,
            obj_model_path,
            cfg,
            VTS_add_time,
        )
