import cv2
import json
import numpy as np
import os
from mmcv import ProgressBar

from mmdet.core import eval_map
from mmdet.datasets import DATASETS, CustomDataset


@DATASETS.register_module()
class CrowdHumanDataset(CustomDataset):

    CLASSES = ("head", "body")

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def load_annotations(self, ann_file_path):
        data_infos = list()
        print(F"load annotations from {ann_file_path}...")
        progress_bar = ProgressBar()
        if "train" in ann_file_path:
            progress_bar.task_num = 15000
        elif "val" in ann_file_path:
            progress_bar.task_num = 4370
        with open(ann_file_path) as ann_file:
            for line in ann_file:
                ann = json.loads(line.strip())
                filename = ann["ID"] + ".jpg"
                image_path = os.path.join(self.img_prefix, filename)
                image = cv2.imread(image_path)
                width, height = image.shape[1], image.shape[0]
                boxes, labels = list(), list()
                for gt_box in ann["gtboxes"]:
                    if gt_box["tag"] != "person":
                        continue
                    x1, y1, w, h = gt_box["hbox"]
                    x2 = x1 + w - 1
                    y2 = y1 + h - 1
                    boxes.append([x1, y1, x2, y2])
                    labels.append(0)
                    x1, y1, w, h = gt_box["fbox"]
                    x2 = x1 + w - 1
                    y2 = y1 + h - 1
                    boxes.append([x1, y1, x2, y2])
                    labels.append(1)
                boxes = np.array(boxes, dtype=np.float32)
                labels = np.array(labels, dtype=np.int64)
                bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
                labels_ignore = np.zeros((0, ), dtype=np.int64)
                data_infos.append({
                    "filename": filename,
                    "width": width,
                    "height": height,
                    "ann": {
                        "bboxes": boxes,
                        "labels": labels,
                        "bboxes_ignore": bboxes_ignore,
                        "labels_ignore": labels_ignore
                    }
                })
                progress_bar.update()
        return data_infos

    def get_ann_info(self, idx):
        return self.data_infos[idx]["ann"]

    def _filter_imgs(self, min_size=32):
        """Filter images too small or with empty head boxes"""
        valid_inds = []
        for i, img_info in enumerate(self.data_infos):
            if len(img_info["ann"]["bboxes"]) <= 0:
                continue
            if min(img_info["width"], img_info["height"]) >= min_size:
                valid_inds.append(i)
        return valid_inds

    def evaluate(self,
                 results,
                 metric="mAP",
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate in VOC protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'mAP', 'recall'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple], optional): Scale ranges for evaluating
                mAP. If not specified, all bounding boxes would be included in
                evaluation. Default: None.

        Returns:
            dict[str, float]: AP/recall metrics.
        """
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ["mAP"]
        if metric not in allowed_metrics:
            raise KeyError(f"metric {metric} is not supported")
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == "mAP":
            assert isinstance(iou_thr, float)
            ds_name = self.CLASSES
            mean_ap, _ = eval_map(
                results,
                annotations,
                scale_ranges=None,
                iou_thr=iou_thr,
                dataset=ds_name,
                logger=logger)
            eval_results["mAP"] = mean_ap
        return eval_results
