from typing import List, Tuple, Dict
from tqdm import tqdm
import numpy as np
from numpy.typing import NDArray
from filterpy.kalman import KalmanFilter
from scipy.optimize import linear_sum_assignment
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull

from hj_dataset_devkit import ObstacleCategory

from .utils.immortal_tracker_util import BBox
from components.obj_tracker import ObjTracker, FrameInfo, TrackObject

__all__ = ['ImmortalTracker']

class KalmanFilterMotionModel:
    def __init__(self, bbox: BBox, time_stamp: int, covariance='default') -> None:
        # the time stamp of last observation
        self.prev_time_stamp = time_stamp
        self.latest_time_stamp = time_stamp
        # define constant velocity model
        self.score = bbox.s
        self.kf = KalmanFilter(dim_x=10, dim_z=7)
        self.kf.x[:7] = bbox.to_array()[:7].reshape((7, 1))
        self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0],    # state transition matrix
                              [0,1,0,0,0,0,0,0,1,0],
                              [0,0,1,0,0,0,0,0,0,1],
                              [0,0,0,1,0,0,0,0,0,0],  
                              [0,0,0,0,1,0,0,0,0,0],
                              [0,0,0,0,0,1,0,0,0,0],
                              [0,0,0,0,0,0,1,0,0,0],
                              [0,0,0,0,0,0,0,1,0,0],
                              [0,0,0,0,0,0,0,0,1,0],
                              [0,0,0,0,0,0,0,0,0,1]])     
        self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0],    # measurement function,
                              [0,1,0,0,0,0,0,0,0,0],
                              [0,0,1,0,0,0,0,0,0,0],
                              [0,0,0,1,0,0,0,0,0,0],
                              [0,0,0,0,1,0,0,0,0,0],
                              [0,0,0,0,0,1,0,0,0,0],
                              [0,0,0,0,0,0,1,0,0,0]])
        self.kf.B = np.zeros((10, 1))                   # dummy control transition matrix
        # with angular velocity
        # self.kf = KalmanFilter(dim_x=11, dim_z=7)       
        # self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0],      # state transition matrix
        #                       [0,1,0,0,0,0,0,0,1,0,0],
        #                       [0,0,1,0,0,0,0,0,0,1,0],
        #                       [0,0,0,1,0,0,0,0,0,0,1],  
        #                       [0,0,0,0,1,0,0,0,0,0,0],
        #                       [0,0,0,0,0,1,0,0,0,0,0],
        #                       [0,0,0,0,0,0,1,0,0,0,0],
        #                       [0,0,0,0,0,0,0,1,0,0,0],
        #                       [0,0,0,0,0,0,0,0,1,0,0],
        #                       [0,0,0,0,0,0,0,0,0,1,0],
        #                       [0,0,0,0,0,0,0,0,0,0,1]])     
        # self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0],      # measurement function,
        #                       [0,1,0,0,0,0,0,0,0,0,0],
        #                       [0,0,1,0,0,0,0,0,0,0,0],
        #                       [0,0,0,1,0,0,0,0,0,0,0],
        #                       [0,0,0,0,1,0,0,0,0,0,0],
        #                       [0,0,0,0,0,1,0,0,0,0,0],
        #                       [0,0,0,0,0,0,1,0,0,0,0]])
        self.covariance_type = covariance
        if self.covariance_type == 'default':
            # self.kf.R[0:,0:] *= 10.     # measurement uncertainty
            self.kf.P[7:, 7:] *= 1000.  # state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
            self.kf.P *= 10.
            # self.kf.Q[-1,-1] *= 0.01    # process uncertainty
            # self.kf.Q[7:, 7:] *= 0.01
        self.history = [bbox]

    def predict(self) -> None:
        self.kf.predict()
        if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
        if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2

    def update(self, det_bbox: BBox) -> None: 
        """ Updates the state vector with observed bbox.
        """
        bbox = det_bbox.to_array()[:7]
        # full pipeline of kf, first predict, then update
        self.predict()
        # orientation correction of kf prediction and bbox
        if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
        if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
        new_theta = bbox[3]
        if new_theta >= np.pi: new_theta -= np.pi * 2
        if new_theta < -np.pi: new_theta += np.pi * 2
        bbox[3] = new_theta
        # orientation correction of gap between kf prediction and bbox
        predicted_theta = self.kf.x[3]
        # if the angle of two theta is not acute angle
        if np.abs(new_theta - predicted_theta) > np.pi / 2.0 and \
            np.abs(new_theta - predicted_theta) < np.pi * 3 / 2.0:
            self.kf.x[3] += np.pi       
            if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2
            if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
        # now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90
        if np.abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:
            if new_theta > 0: self.kf.x[3] += np.pi * 2
            else: self.kf.x[3] -= np.pi * 2
        self.kf.update(bbox)
        self.prev_time_stamp = self.latest_time_stamp
        if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
        if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
        if det_bbox.s is None:
            self.score = self.score * 0.01
        else:
            self.score = det_bbox.s
        cur_bbox = self.kf.x[:7].reshape(-1).tolist()
        cur_bbox = BBox.from_array(cur_bbox + [self.score])
        self.history[-1] = cur_bbox

    def get_prediction(self, time_stamp: int) -> BBox:       
        """ Advances the state vector and returns the predicted bounding box estimate.
        """
        time_lag = abs(time_stamp - self.prev_time_stamp)
        self.latest_time_stamp = time_stamp
        self.kf.F = np.array([[1,0,0,0,0,0,0,time_lag,0,0], # state transition matrix
                              [0,1,0,0,0,0,0,0,time_lag,0],
                              [0,0,1,0,0,0,0,0,0,time_lag],
                              [0,0,0,1,0,0,0,0,0,0],
                              [0,0,0,0,1,0,0,0,0,0],
                              [0,0,0,0,0,1,0,0,0,0],
                              [0,0,0,0,0,0,1,0,0,0],
                              [0,0,0,0,0,0,0,1,0,0],
                              [0,0,0,0,0,0,0,0,1,0],
                              [0,0,0,0,0,0,0,0,0,1]])
        pred_x = self.kf.get_prediction()[0]
        if pred_x[3] >= np.pi: pred_x[3] -= np.pi * 2
        if pred_x[3] < -np.pi: pred_x[3] += np.pi * 2
        pred_bbox = BBox.from_array(pred_x[:7].reshape(-1))
        self.history.append(pred_bbox)
        return pred_bbox

    def get_state(self) -> BBox:
        """ Returns the current bounding box estimate.
        """
        return self.history[-1]

    def compute_innovation_matrix(self) -> NDArray:
        """ compute the innovation matrix for association with mahalonobis distance
        """
        return np.matmul(np.matmul(self.kf.H, self.kf.P), self.kf.H.T) + self.kf.R

    def sync_time_stamp(self, time_stamp: int) -> None:
        self.time_stamp = time_stamp

class HitManager:
    def __init__(self, config: dict, frame_count: int) -> None:
        self.time_since_update = 0
        self.hits = 1           # number of total hits including the first detection
        self.hit_streak = 1     # number of continuing hit considering the first detection
        self.age = 0
        self.type = config['tracker']
        self.immortal = self.type == 'immortal'
        if not self.immortal:
            self.max_age = config['max_age_since_update'][self.type]
        self.min_hits = config['min_hits_to_birth'][self.type]
        self.state = 'birth'
        self.recent_is_associated = True
        if frame_count <= self.min_hits or self.min_hits == 0:
            self.state = 'alive'
            self.recent_is_associated = True

    def update(self, is_matched: bool, frame_count: int, is_key_frame=True) -> None:
        # the update happening during the non-key-frame
        # can extend the life of tracklet
        # association = self.if_valid(update_info)
        # merge predict and update for simplicity
        if is_key_frame:
            self.age += 1
            if self.time_since_update > 0:
                self.hit_streak = 0
            self.time_since_update += 1
        # if associate successfully
        if is_matched:
            # self.fall = False
            self.time_since_update = 0
            self.history = []
            self.hits += 1
            self.hit_streak += 1    # number of continuing hit
        if is_key_frame:
            self.state_transition(is_matched, frame_count)

    def state_transition(self, is_associated: bool, frame_count: int) -> None:
        # if just founded
        self.recent_is_associated = is_associated
        if self.state == 'birth':
            if (self.hits >= self.min_hits) or (frame_count <= self.min_hits):
                self.state = 'alive'
            elif (not self.immortal) and self.time_since_update >= self.max_age:
                self.state = 'dead'
        # already alive
        elif (not self.immortal) and self.state == 'alive':
            if self.time_since_update >= self.max_age:
                self.state = 'dead'

    def death(self) -> bool:
        return self.state == 'dead'

    # def state_string(self) -> str:
    #     assert self.recent_is_associated is not None
    #     if self.state == 'birth':
    #         return '{:}_{:}_{:}'.format(self.state, self.hits, self.time_since_update)
    #     elif self.state == 'alive':
    #         return '{:}_{:}_{:}'.format(self.state, self.recent_is_associated, self.time_since_update)
    #     elif self.state == 'dead':
    #         return '{:}_{:}'.format(self.state, self.time_since_update)

class Tracklet:
    def __init__(self, config: dict, id_: int, bbox: BBox,
                 det_type: ObstacleCategory, frame_count: int, time_stamp: int) -> None:
        self.id = id_
        self.time_stamp = time_stamp
        # self.birthday = birthday    # abs_frame_index
        self.det_type = det_type
        self.motion_model = KalmanFilterMotionModel(
            bbox, time_stamp, covariance=config['kalman_filter_covariance'])
        self.score_multiplier = config.get('score_multiplier', 0.01)
        # life and death management
        self.life_manager = HitManager(config['hit_manager_cfg'], frame_count)
        # store the score for the latest bbox
        self.latest_score = bbox.s
        self.frame_idx_history = []
        # self.frozen = False
        # self.extended = False

    def predict(self, time_stamp: int) -> BBox:
        """ in the prediction step, the motion model predicts the state of bbox
            the other components have to be synced
            the result is a BBox
            the ussage of time_stamp is optional, only if you use velocities
        """
        # the predicted state will be appended to motion model history
        return self.motion_model.get_prediction(time_stamp)

    def update(self, is_matched: bool, bbox: BBox, frame_count: int) -> None:
        """ update the state of the tracklet
        """
        # is_key_frame = update_info.aux_info['is_key_frame']
        # only the direct association update the motion model
        if is_matched:
            self.latest_score = bbox.s
            self.motion_model.update(bbox)
        else:
            self.latest_score = bbox.s * self.score_multiplier
        # self.life_manager.predict(is_key_frame=is_key_frame)
        self.life_manager.update(is_matched, frame_count)

    def get_state(self) -> BBox:
        """ current state of the tracklet
        """
        result = self.motion_model.get_state()
        result.s = self.latest_score
        return result

    def death(self) -> bool:
        return self.life_manager.death()

    # def state_string(self) -> str:
    #     """ the string describes how we get the bbox (e.g. by detection or motion model prediction)
    #     """
    #     return self.life_manager.state_string()

    def compute_innovation_matrix(self) -> NDArray:
        """ compute the innovation matrix for association with mahalonobis distance
        """
        return self.motion_model.compute_innovation_matrix()

    def sync_time_stamp(self, time_stamp: int) -> None:
        """ sync the time stamp for motion model
        """
        self.motion_model.sync_time_stamp(time_stamp)

import torch
from mmdet3d.core import LiDARInstance3DBoxes, bbox_overlaps_3d
def calculate_iou_3d_gpu(preds, gts, mode='iou', translation=True):
    if isinstance(preds, np.ndarray):
        preds = torch.from_numpy(preds).cuda().float()
    if isinstance(gts, np.ndarray):
        gts = torch.from_numpy(gts).cuda().float()
    assert isinstance(preds, torch.Tensor)
    if translation:
        trans = preds[[0], :3]
        gts[:, :3] -= trans
        preds[:, :3] -= trans
    assert preds.shape[0] > 0 and preds.shape[-1] == 7
    assert gts.shape[0] > 0 and gts.shape[-1] == 7
    preds = LiDARInstance3DBoxes(preds, box_dim=7, with_yaw=True, origin=(0.5, 0.5, 0.5))
    gts = LiDARInstance3DBoxes(gts, box_dim=7, with_yaw=True, origin=(0.5, 0.5, 0.5))
    ious = bbox_overlaps_3d(preds.tensor, gts.tensor, mode=mode, coordinate='lidar') #[num_preds, num_gts]
    assert ious.size(0) == preds.tensor.size(0)
    r = ious.cpu().numpy()
    return r

def compute_iou_distance_gpu(dets: List[BBox], tracks: List[BBox], asso: str) -> NDArray:
    assert asso == 'iou', 'Do not support GIoU for now'
    if len(dets) == 0 or len(tracks) == 0:
        return np.zeros((len(dets), len(tracks)))
    dets = BBox.merge_boxes(dets)
    tracks = BBox.merge_boxes(tracks)
    iou_matrix = calculate_iou_3d_gpu(dets, tracks, mode='iou')
    # iou_matrix = calculate_iou_bev_gpu(dets, tracks, mode='iou')
    dist_matrix =  1 - iou_matrix
    assert dist_matrix.shape[0] == len(dets)
    assert dist_matrix.shape[1] == len(tracks)
    return dist_matrix

def iou3d(box_a: BBox, box_b: BBox) -> Tuple[float, float]:
    boxa_corners = np.array(box_a.to_corners2d())
    boxb_corners = np.array(box_b.to_corners2d())[:, :2]
    reca, recb = Polygon(boxa_corners), Polygon(boxb_corners)
    overlap_area = reca.intersection(recb).area
    iou_2d = overlap_area / (reca.area + recb.area - overlap_area)
    ha, hb = box_a.h, box_b.h
    za, zb = box_a.z, box_b.z
    overlap_height = max(0, min((za+ha/2) - (zb-hb/2), (zb+hb/2) - (za-ha/2)))
    overlap_volume = overlap_area * overlap_height
    union_volume = box_a.w * box_a.l * ha + box_b.w * box_b.l * hb - overlap_volume
    iou_3d = overlap_volume / (union_volume + 1e-5)
    return iou_2d, iou_3d

def PolyArea2D(pts: NDArray) -> float:
    roll_pts = np.roll(pts, -1, axis=0)
    area = np.abs(np.sum((pts[:, 0]*roll_pts[:, 1] - pts[:, 1]*roll_pts[:, 0]))) * 0.5
    return area

def giou3d(box_a: BBox, box_b: BBox) -> float:
    boxa_corners = np.array(box_a.to_corners2d())[:, :2]
    boxb_corners = np.array(box_b.to_corners2d())[:, :2]
    reca, recb = Polygon(boxa_corners), Polygon(boxb_corners)
    ha, hb = box_a.h, box_b.h
    za, zb = box_a.z, box_b.z
    overlap_height = max(0, min((za+ha/2) - (zb-hb/2), (zb+hb/2) - (za-ha/2)))
    union_height = max((za+ha/2) - (zb-hb/2), (zb+hb/2) - (za-ha/2))
    # compute intersection and union
    I = reca.intersection(recb).area * overlap_height
    U = box_a.w * box_a.l * ha + box_b.w * box_b.l * hb - I
    # compute the convex area
    all_corners = np.vstack((boxa_corners, boxb_corners))
    C = ConvexHull(all_corners)
    convex_corners = all_corners[C.vertices]
    convex_area = PolyArea2D(convex_corners)
    C = convex_area * union_height
    # compute giou
    giou = I / U - (C - U) / C
    return giou

def compute_iou_distance(dets: List[BBox], tracks: List[BBox], asso: str, gpu: bool) -> NDArray:
    if gpu:
        gpu_results = compute_iou_distance_gpu(dets, tracks, asso)
        return gpu_results
    iou_matrix = np.zeros((len(dets), len(tracks)))
    for d, det in enumerate(dets):
        for t, trk in enumerate(tracks):
            if asso == 'iou':
                iou_matrix[d, t] = iou3d(det, trk)[1] # 0 is bev iou
            elif asso == 'giou':
                iou_matrix[d, t] = giou3d(det, trk)
    dist_matrix = 1 - iou_matrix
    # if len(tracks) > 0:
    #     error = np.abs((gpu_results - dist_matrix)).max()
    #     if error > 0.5:
    #         m_dets = BBox.merge_boxes(dets)
    #         m_tracks = BBox.merge_boxes(tracks)
    #         set_trace()
    return dist_matrix

def diff_orientation_correction(diff: float) -> float:
    """ return the angle diff = det - trk
        if angle diff > 90 or < -90, rotate trk and update the angle diff
    """
    if diff > np.pi / 2:
        diff -= np.pi
    if diff < -np.pi / 2:
        diff += np.pi
    return diff

def m_distance(det: BBox, trk: BBox, trk_inv_innovation_matrix: NDArray=None) -> float:
    det_array = det.to_array()[:7]
    trk_array = trk.to_array()[:7]
    diff = np.expand_dims(det_array - trk_array, axis=1)
    corrected_yaw_diff = diff_orientation_correction(diff[3])
    diff[3] = corrected_yaw_diff
    if trk_inv_innovation_matrix is not None:
        result = \
            np.sqrt(np.matmul(np.matmul(diff.T, trk_inv_innovation_matrix), diff)[0][0])
    else:
        result = np.sqrt(np.dot(diff.T, diff))
    return result

def compute_m_distance(dets: List[BBox], tracks: List[BBox],
                       trk_innovation_matrix: List[NDArray]) -> NDArray:
    """ compute l2 or mahalanobis distance
        when the input trk_innovation_matrix is None, compute L2 distance (euler)
        else compute mahalanobis distance
        return dist_matrix: numpy array [len(dets), len(tracks)]
    """
    euler_dis = (trk_innovation_matrix is None) # is use euler distance
    if not euler_dis:
        trk_inv_inn_matrices = [np.linalg.inv(m) for m in trk_innovation_matrix]
    dist_matrix = np.empty((len(dets), len(tracks)))
    for i, det in enumerate(dets):
        for j, trk in enumerate(tracks):
            if euler_dis:
                dist_matrix[i, j] = m_distance(det, trk)
            else:
                dist_matrix[i, j] = m_distance(det, trk, trk_inv_inn_matrices[j])
    return dist_matrix

def bipartite_matcher(dets: List[BBox], tracks: List[BBox], asso: str,
                      trk_innovation_matrix: List[NDArray], gpu: bool) -> Tuple[NDArray, NDArray]:
    if asso == 'iou':
        distance_matrix = compute_iou_distance(dets, tracks, asso, gpu)
    elif asso == 'giou':
        distance_matrix = compute_iou_distance(dets, tracks, asso, gpu)
    elif asso == 'm_dis':
        distance_matrix = compute_m_distance(dets, tracks, trk_innovation_matrix)
    elif asso == 'euler':
        distance_matrix = compute_m_distance(dets, tracks, None)
    row_ind, col_ind = linear_sum_assignment(distance_matrix)
    matched_indices = np.stack([row_ind, col_ind], axis=1)
    return matched_indices, distance_matrix

def greedy_matcher(dets: List[BBox], tracks: List[BBox], asso: str,
                   trk_innovation_matrix: List[NDArray], gpu: bool) -> Tuple[NDArray, NDArray]:
    """ it's ok to use iou in bipartite
        but greedy is only for m_distance
    """
    matched_indices = list()
    # compute the distance matrix
    if asso == 'm_dis':
        distance_matrix = compute_m_distance(dets, tracks, trk_innovation_matrix)
    elif asso == 'euler':
        distance_matrix = compute_m_distance(dets, tracks, None)
    elif asso == 'iou':
        distance_matrix = compute_iou_distance(dets, tracks, asso, gpu)
    elif asso == 'giou':
        distance_matrix = compute_iou_distance(dets, tracks, asso, gpu)
    num_dets, num_trks = distance_matrix.shape
    # association in the greedy manner
    distance_1d = distance_matrix.reshape(-1)
    index_1d = np.argsort(distance_1d)
    index_2d = np.stack([index_1d // num_trks, index_1d % num_trks], axis=1)
    detection_id_matches_to_tracking_id = [-1] * num_dets
    tracking_id_matches_to_detection_id = [-1] * num_trks
    for sort_i in range(index_2d.shape[0]):
        detection_id = int(index_2d[sort_i][0])
        tracking_id = int(index_2d[sort_i][1])
        if tracking_id_matches_to_detection_id[tracking_id] == -1 and detection_id_matches_to_tracking_id[detection_id] == -1:
            tracking_id_matches_to_detection_id[tracking_id] = detection_id
            detection_id_matches_to_tracking_id[detection_id] = tracking_id
            matched_indices.append([detection_id, tracking_id])
    if len(matched_indices) == 0:
        matched_indices = np.empty((0, 2))
    else:
        matched_indices = np.asarray(matched_indices)
    return matched_indices, distance_matrix

def associate_dets_to_tracks(dets: List[BBox], tracks: List[BBox], mode: str, asso: str,
                             dist_threshold: float, trk_innovation_matrix: List[NDArray],
                             gpu: bool) -> Tuple[List[NDArray], NDArray, NDArray]:
    """ associate the tracks with detections
    """
    if mode == 'bipartite':
        matched_indices, dist_matrix = \
            bipartite_matcher(dets, tracks, asso, trk_innovation_matrix, gpu)
    elif mode == 'greedy':
        matched_indices, dist_matrix = \
            greedy_matcher(dets, tracks, asso, trk_innovation_matrix, gpu)
    unmatched_dets = list()
    for d, det in enumerate(dets):
        if d not in matched_indices[:, 0]:
            unmatched_dets.append(d)
    unmatched_tracks = list()
    for t, trk in enumerate(tracks):
        if t not in matched_indices[:, 1]:
            unmatched_tracks.append(t)
    matches = list()
    for m in matched_indices:
        if dist_matrix[m[0], m[1]] > dist_threshold:
            unmatched_dets.append(m[0])
            unmatched_tracks.append(m[1])
        else:
            matches.append(m.reshape(2))
    return matches, np.array(unmatched_dets), np.array(unmatched_tracks)

class MOTModel:
    def __init__(self, config: dict) -> None:
        self.tracklets: List[Tracklet] = [] # tracker for each single tracklet
        self.frame_count = 0                # record for the frames
        self.trk_count = 0                  # count tracked obj number and used as track id
        self.time_stamp = None              # the previous time stamp
        self.tracklet_cfg = config['tracklet_cfg']
        self.match_type = config['match_type']
        self.score_threshold = config['score_threshold']
        self.asso = config['asso']
        self.asso_thres = config['asso_thres'][self.asso]
        self.gpu = config.get('gpu', False)

    def frame_mot(self, frame_info: FrameInfo) -> List[TrackObject]:
        """ For each frame input, generate the latest mot results
        Args:
            frame_info (FrameInfo): input data, including detection bboxes and ego information
        Returns:
            tracks on this frame: [(bbox0, id0), (bbox1, id1), ...]
        """
        self.frame_count += 1
        # initialize the time stamp on frame 0
        if self.time_stamp is None:
            self.time_stamp = frame_info.timestamp
        det_bboxes = [BBox.from_detect_obj(obj) for obj in frame_info.detect]
        matched, unmatched_dets, unmatched_trks = \
            self.forward_step_trk(frame_info.timestamp, det_bboxes)
        # time_lag = frame_info.timestamp - self.time_stamp
        # update the matched tracks
        for t, trk in enumerate(self.tracklets):
            if t not in unmatched_trks:
                for k in range(len(matched)):
                    if matched[k][1] == t:
                        d = matched[k][0]
                        break
                # if matched, update motion_model, else update score only
                trk.update(True, det_bboxes[d], self.frame_count)
            else:
                trk.update(False, trk.get_state(), self.frame_count)
        # create new tracks for unmatched detections
        for index in unmatched_dets:
            track = Tracklet(self.tracklet_cfg, self.trk_count,
                             det_bboxes[index],
                             frame_info.detect[index].category, 
                             self.frame_count, frame_info.timestamp)
            self.tracklets.append(track)
            self.trk_count += 1
        # remove dead tracks
        track_num = len(self.tracklets)
        for index, trk in enumerate(reversed(self.tracklets)):
            if trk.death():
                self.tracklets.pop(track_num - 1 - index)
        # wrap up and update the information about the mot trackers
        self.time_stamp = frame_info.timestamp
        for trk in self.tracklets:
            trk.sync_time_stamp(self.time_stamp)
        # output the results
        result: List[TrackObject] = []
        for trk in self.tracklets:
            trk.frame_idx_history.append(frame_info.abs_index)
            result.append(TrackObject(trk.id, trk.det_type,
                                      trk.get_state().to_boundingbox(),
                                      trk.latest_score))
        return result

    def forward_step_trk(self, timestamp: int, det_bboxes: List[BBox]) -> Tuple[List[NDArray], NDArray, NDArray]:
        det_indexes = [i for i, det in enumerate(det_bboxes) if det.s >= self.score_threshold]
        valid_dets = [det_bboxes[i] for i in det_indexes]
        # prediction
        trk_preds = list()
        for trk in self.tracklets:
            trk_preds.append(trk.predict(timestamp))
        # association
        trk_innovation_matrix = None
        if self.asso == 'm_dis':
            trk_innovation_matrix = [trk.compute_innovation_matrix() for trk in self.tracklets]
        matched, unmatched_dets, unmatched_trks = \
            associate_dets_to_tracks(valid_dets, trk_preds, self.match_type,
                                     self.asso, self.asso_thres,
                                     trk_innovation_matrix, self.gpu)
        # update to absolute index
        for k in range(len(matched)):
            matched[k][0] = det_indexes[matched[k][0]]
        for k in range(len(unmatched_dets)):
            unmatched_dets[k] = det_indexes[unmatched_dets[k]]
        return matched, unmatched_dets, unmatched_trks

class ImmortalTracker(ObjTracker):
    def __init__(self, mot_config: dict) -> None:
        super().__init__()
        self.mot_config = mot_config

    def process(self, frames_info: List[FrameInfo]) -> Dict[int, List[TrackObject]]:
        tracker = MOTModel(self.mot_config)
        scene_results: Dict[int, List[TrackObject]] = {}
        for frame_info in tqdm(frames_info, 'tracking'):
            # mot
            frame_results = tracker.frame_mot(frame_info)
            scene_results[frame_info.timestamp] = frame_results
        return scene_results
