import numpy as np

from utils.bbox_utils import xywh2xyxy

from DeepSORT.ReID.feature_extractor import Extractor
from DeepSORT.baseObject.detection import Detection
from DeepSORT.tracker import Tracker
from DeepSORT.NearestNeighborDistanceMetric import NearestNeighborDistanceMetric

# 暂存区
MAX_DISTANCE = 0.2
NN_BUDGET = 100
MAX_IOU_DISTANCE = 0.7
MAX_AGE = 70
N_INIT = 3


class DeepSORT(object):
    def __init__(self, use_cuda=True):
        self.extractor = Extractor(use_cuda=use_cuda)
        max_cosine_distance = MAX_DISTANCE
        nn_budget = NN_BUDGET
        metric = NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        self.tracker = Tracker(
            metric, max_iou_distance=MAX_IOU_DISTANCE, max_age=MAX_AGE, n_init=N_INIT)

    def update(self, output_tensor, origin_image):
        height, width, _ = origin_image.shape
        detections = []
        if output_tensor is not None:
            features = self._get_features(self, output_tensor, origin_image)
            detections = [Detection(output_tensor[i], features[i]) for i in range(len(output_tensor))]
        self.tracker.predict()
        self.tracker.update(detections)

        # output bbox identities
        outputs = []
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            box = track.to_xywh()
            x1, y1, x2, y2 = xywh2xyxy(box)
            outputs.append((x1, y1, x2, y2, track.track_id))
        return outputs

    @staticmethod
    def _get_features(self, bbox_xyxy, origin_image):
        img_cropped = []
        for box in bbox_xyxy:
            x1, y1, x2, y2 = box[:3]
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
            im = origin_image[y1:y2, x1:x2]
            img_cropped.append(im)
        if img_cropped:
            features = self.extractor(img_cropped)
        else:
            features = np.array([])
        return features
