from typing import Dict
import torch
import numpy as np
from numpy.typing import NDArray
import os

from hj_dataset_devkit import HongjingScene, Obstacle, BoundingBox
from model_frame.utils import boxes_iou3d_gpu, write_pickle

from common_utils import read_yaml, euler_to_rotation
from components.obj_tracker import TrackObject
from obj_autolabeling import DetTrackRefineLabeler

CONFIG_PATH = 'configs/det_track_refine_labeler.yaml'
OUTPUT_PATH = '$OUTPUT_PATH'

IOU_THRESH = 0.3
CROP_SCALER = 1.5
MIN_CROP_POINTS = 4
MIN_SEQ_LEN = 2

class ObjRefineSeq:
    class AnnoInferPair:
        def __init__(self, anno: Obstacle, infer: TrackObject, cloud: NDArray, pose: NDArray) -> None:
            self.anno = anno
            self.infer = infer
            self.cloud = cloud
            self.pose = pose

        def to_dict(self) -> dict:
            return {
                'anno': self.anno.to_dict(),
                'infer': self.infer.to_dict(),
                'cloud': self.cloud,
                'pose': self.pose,
            }

    def __init__(self) -> None:
        self.__seq: Dict[int, ObjRefineSeq.AnnoInferPair] = {}

    @property
    def len(self) -> int:
        return len(self.__seq)

    def add_frame(self, stamp: int, anno: Obstacle, infer: TrackObject, crop_pc: NDArray, pose: NDArray) -> None:
        self.__seq[stamp] = ObjRefineSeq.AnnoInferPair(anno, infer, crop_pc, pose)

    def filter_best_gt(self) -> None:
        match_statistic = {}
        for pair in self.__seq.values():
            if pair.anno.id in match_statistic:
                match_statistic[pair.anno.id] += 1
            else:
                match_statistic[pair.anno.id] = 1
        best_id = max(match_statistic, key=match_statistic.get)
        self.__seq = {stamp: pair for stamp, pair in self.__seq.items() if pair.anno.id == best_id}

    def to_pickle(self) -> Dict[int, dict]:
        return {stamp: pair.to_dict() for stamp, pair in self.__seq.items()}

def cloud_crop_by_box(cloud: NDArray, box: BoundingBox) -> NDArray:
    def to_box_rotation(box: BoundingBox) -> NDArray:
        rot_mat = euler_to_rotation(box.rotation.to_list()).as_matrix()
        return np.linalg.inv(rot_mat)

    local_pc = (to_box_rotation(box) @ (cloud[:, :3] - np.array(box.center.to_list())).T).T
    mask = (local_pc[:, 0] > -box.size.x / 2 * CROP_SCALER) & \
           (local_pc[:, 0] < box.size.x / 2 * CROP_SCALER) & \
           (local_pc[:, 1] > -box.size.y / 2 * CROP_SCALER) & \
           (local_pc[:, 1] < box.size.y / 2 * CROP_SCALER) & \
           (local_pc[:, 2] > -box.size.z / 2 * CROP_SCALER) & \
           (local_pc[:, 2] < box.size.z / 2 * CROP_SCALER)
    return cloud[mask]

def output_scene_refine_data(scene: HongjingScene) -> None:
    tracked_det = labeler.track(labeler.detect(scene, 64))
    obj_seqs: Dict[int, ObjRefineSeq] = {}
    # iou match
    for frame in scene.frames:
        anno_objs = frame.get_obstacles()
        if anno_objs is None or len(anno_objs) == 0:
            continue
        stamp = frame.get_timestamp()
        if stamp not in tracked_det.keys():
            continue
        infer_objs = tracked_det[stamp]
        if len(infer_objs) == 0:
            continue
        anno_tns = torch.tensor([[obj.box.center.x, obj.box.center.y, obj.box.center.z,
                                  obj.box.size.x, obj.box.size.y, obj.box.size.z,
                                  obj.box.rotation.z] for obj in anno_objs])
        infer_tns = torch.tensor([[obj.bbox.center.x, obj.bbox.center.y, obj.bbox.center.z,
                                   obj.bbox.size.x, obj.bbox.size.y, obj.bbox.size.z,
                                   obj.bbox.rotation.z] for obj in infer_objs])
        iou, idx = boxes_iou3d_gpu(anno_tns.cuda(), infer_tns.cuda()).max(dim=0)
        pose = frame.ego_pose
        for i in range(len(infer_objs)):
            if iou[i] < IOU_THRESH:
                continue
            crop_cloud = cloud_crop_by_box(frame.get_lidar_cloud('true_value'), infer_objs[i].bbox)
            if crop_cloud.shape[0] < MIN_CROP_POINTS:
                continue
            if infer_objs[i].id not in obj_seqs:
                obj_seqs[infer_objs[i].id] = ObjRefineSeq()
            obj_seqs[infer_objs[i].id].add_frame(
                stamp, anno_objs[idx[i]], infer_objs[i], crop_cloud, pose
            )
    # save obj refine seq
    for obj_id, seq in obj_seqs.items():
        seq.filter_best_gt()
        if seq.len < MIN_SEQ_LEN:
            continue
        write_pickle(os.path.join(OUTPUT_PATH, f'{scene.meta.name}_{obj_id}.pkl'), seq.to_pickle())

if __name__ == '__main__':
    labeler = DetTrackRefineLabeler(**read_yaml(CONFIG_PATH)['labeler_kwargs'])
    scene = HongjingScene.from_path('$SCENE_PATH')
    output_scene_refine_data(scene)
