import os.path as osp
import xml.etree.ElementTree as ET

import mmcv
from mmdet.core import eval_map
from mmdet.datasets import DATASETS, VOCDataset
from PIL import Image


@DATASETS.register_module()
class BrainwashDataset(VOCDataset):

    CLASSES = ("head", )

    def __init__(self, **kwargs):
        super(VOCDataset, self).__init__(**kwargs)

    def load_annotations(self, ann_file):
        """Load annotation from XML style ann_file.

        Args:
            ann_file (str): Path of XML file.

        Returns:
            list[dict]: Annotation info from XML file.
        """

        data_infos = []
        img_ids = mmcv.list_from_file(ann_file)
        for img_id in img_ids:
            xml_path = osp.join(self.img_prefix, "Annotations",
                                f"{img_id}.xml")
            tree = ET.parse(xml_path)
            root = tree.getroot()

            image_name = root.find("filename").text
            filename = F"JPEGImages/{image_name}"

            size = root.find("size")
            width = 0
            height = 0
            if size is not None:
                width = int(size.find("width").text)
                height = int(size.find("height").text)
            else:
                img_path = osp.join(self.img_prefix, "JPEGImages",
                                    "{}.jpg".format(img_id))
                img = Image.open(img_path)
                width, height = img.size
            data_infos.append(
                dict(id=img_id, filename=filename, width=width, height=height))

        return data_infos

    def evaluate(self,
                 results,
                 metric="mAP",
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate in VOC protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'mAP', 'recall'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple], optional): Scale ranges for evaluating
                mAP. If not specified, all bounding boxes would be included in
                evaluation. Default: None.

        Returns:
            dict[str, float]: AP/recall metrics.
        """
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ["mAP"]
        if metric not in allowed_metrics:
            raise KeyError(f"metric {metric} is not supported")
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == "mAP":
            assert isinstance(iou_thr, float)
            ds_name = self.CLASSES
            mean_ap, _ = eval_map(results,
                                  annotations,
                                  scale_ranges=None,
                                  iou_thr=iou_thr,
                                  dataset=ds_name,
                                  logger=logger)
            eval_results["mAP"] = mean_ap
        return eval_results
