from typing import List, Dict, Any
import numpy as np
from numpy.typing import NDArray

from hj_dataset_devkit import Scene, Obstacle

from common_utils.math_util import pose_interpolate, genertate_rt_matrix, matrix_to_rt

def box_to_pose(box: Obstacle) -> NDArray:
    return genertate_rt_matrix(box.box.center.to_list(), box.box.rotation.to_list())

def interpolate_obj_anno(scene: Scene, extend_t_us=1000*1000) -> Dict[int, List[Obstacle]]:
    def interpolate_size(t: int, t1: int , s1: List[float], t2: int, s2: List[float]) -> List[float]:
        return [s1[i] + (s2[i] - s1[i]) * (t - t1) / (t2 - t1) for i in range(3)]

    # retrieve frames to interpolate, all tracks and all poses
    interp_list: List[int] = []
    obj_tracks: Dict[Any, Dict[int, Obstacle]] = {}
    pose_map: Dict[int, NDArray] = {}
    frames_anno: Dict[int, List[Obstacle]] = {}
    for frame in scene.frames:
        timestamp = frame.get_timestamp()
        pose_map[timestamp] = frame.ego_pose
        anno = frame.get_obstacles()
        if anno is None:
            interp_list.append(timestamp)
            continue
        frames_anno[timestamp] = anno
        for obj in anno:
            if obj.id not in obj_tracks.keys():
                obj_tracks[obj.id] = {}
            obj_tracks[obj.id][timestamp] = obj
    # interpolate for each track
    interp_list = np.array(interp_list)
    for track in obj_tracks.values():
        track_stamps = list(track.keys())
        # assume static for track with single annotation
        if len(track_stamps) == 1:
            ref_t = track_stamps[0]
            interp_t = interp_list[(interp_list <= ref_t + extend_t_us) &
                                   (interp_list >= ref_t - extend_t_us)]
            ref_obj = track[ref_t]
            obj_pose = pose_map[ref_t] @ box_to_pose(ref_obj)
            for t in interp_t:
                interp_pose = np.linalg.inv(pose_map[t]) @ obj_pose
                box_t, box_r = matrix_to_rt(interp_pose)
                obj = Obstacle(ref_obj.id, ref_obj.category, box_t,
                               ref_obj.box.size.to_list(), box_r, 0, 0)
                if t not in frames_anno.keys():
                    frames_anno[t] = []
                frames_anno[t].append(obj)
            continue
        # track with multi annotations
        for i in range(len(track_stamps) - 1):
            t1, t2 = track_stamps[i], track_stamps[i + 1]
            interv1 = t1 - extend_t_us if i == 0 else t1
            interv2 = t2 + extend_t_us if i == len(track_stamps) - 2 else t2
            interp_t = interp_list[(interp_list <= interv2) & (interp_list >= interv1)]
            if len(interp_t) == 0:
                continue
            box1, box2 = track[t1], track[t2]
            pose1, pose2 = pose_map[t1], pose_map[t2]
            interp_poses = pose_interpolate(interp_t, t1, pose1 @ box_to_pose(box1),
                                            t2, pose2 @ box_to_pose(box2))
            for i, t in enumerate(interp_t):
                assert t != t1 and t != t2
                interp_pose = np.linalg.inv(pose_map[t]) @ interp_poses[i]
                box_t, box_r = matrix_to_rt(interp_pose)
                box_s = interpolate_size(t, t1, box1.box.size.to_list(), t2, box2.box.size.to_list())
                obj = Obstacle(box1.id, box1.category, box_t, box_s, box_r, 0, 0)
                if t not in frames_anno.keys():
                    frames_anno[t] = []
                frames_anno[t].append(obj)
    return frames_anno
