# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
import json
from numba import jit
import time
import math

from detectron2.data import MetadataCatalog
from detectron2.utils import comm

from .evaluator import DatasetEvaluator
from ..data.datasets.crowd_human import CLASS_NAMES


class CrowdHumanDetectionEvaluator(DatasetEvaluator):
    """
    Evaluate Crowd Human AP.
    It contains a synchronization, therefore has to be called from all ranks.

    Note that this is a rewrite of the official Matlab API.
    The results should be similar, but not identical to the one produced by
    the official API.
    """

    def __init__(self, dataset_name):
        """
        Args:
            dataset_name (str): name of the dataset, e.g., "crowd_val"
        """
        self._dataset_name = dataset_name
        meta = MetadataCatalog.get(dataset_name)
        with open(os.path.join(meta.dirname, f"annotation_{meta.split}.odgt")) as f:
            tmp_annotation = f.readlines()
        with open(os.path.join(meta.dirname, "size.json")) as f:
            self._size = json.load(f)
        self._anns = list(map(json.loads, tmp_annotation))
        self._class_names = meta.thing_classes
        self._is_2007 = False
        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

    def reset(self):
        self._predictions = defaultdict(list)  # class name -> list of prediction strings
        self._pred_id = defaultdict(list)  # class name -> list of prediction strings

    def process(self, inputs, outputs):
        for input, output in zip(inputs, outputs):
            image_id = input["image_id"]
            instances = output["instances"].to(self._cpu_device)
            boxes = instances.pred_boxes.tensor.numpy()
            scores = instances.scores.tolist()
            classes = instances.pred_classes.tolist()
            for box, score, cls in zip(boxes, scores, classes):
                xmin, ymin, xmax, ymax = box
                # The inverse of data loading logic in `datasets/crowd.py`
                xmin += 1
                ymin += 1
                self._predictions[cls].append(
                    f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
                )
                self._pred_id[image_id].append({
                    "score": round(float(score), 5),
                    "tag": 1,
                    "box": [round(float((xmin+xmax)/2), 1), round(float((ymin+ymax)/2), 1), round(float(xmax-xmin), 1), round(float(ymax-ymin), 1)],
                })

    def evaluate(self):
        """
        Returns:
            dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
        """
        # Save all detections to "box.human"
        area_ranges = {
            "s": [0, 32**2],
            "m": [32**2, 96**2],
            "l": [96**2, 1e10],
        }

        all_predictions = comm.gather(self._predictions, dst=0)
        if not comm.is_main_process():
            return
        predictions = defaultdict(list)
        for predictions_per_rank in all_predictions:
            for clsid, lines in predictions_per_rank.items():
                predictions[clsid].extend(lines)
        del all_predictions

        self._logger.info(
            "Evaluating {} using {} metric. "
            "Note that results do not use the official Matlab API.".format(
                self._dataset_name, 2007 if self._is_2007 else 2012
            )
        )

        ret = OrderedDict()
        bbox={}
        with tempfile.TemporaryDirectory(prefix="crowd_eval_") as dirname:
            res_file_template = os.path.join(dirname, "{}.txt")
            mAp = []
            for cls_id, cls_name in enumerate(self._class_names):
                lines = predictions.get(cls_id, [""])

                with open(res_file_template.format(cls_name), "w") as f:
                    f.write("\n".join(lines))
                    for area_name, area_range in area_ranges.items():
                        rec, prec, ap, lamr = crowd_eval(
                            res_file_template,
                            self._anns,
                            cls_name,
                            ovthresh=0.5,
                            use_07_metric=self._is_2007,
                            logger=self._logger,
                            area_range=area_range,
                        )
                        bbox[f"{cls_name}_{area_name}_ap"] = ap
                        bbox[f"{cls_name}_{area_name}_lamr"] = lamr
                    rec, prec, ap, lamr = crowd_eval(
                        res_file_template,
                        self._anns,
                        cls_name,
                        ovthresh=0.5,
                        use_07_metric=self._is_2007,
                        logger=self._logger,
                    )
                    bbox[f"{cls_name}_ap"] = ap
                    bbox[f"{cls_name}_lamr"] = lamr
                    mAp.append(ap)
        bbox.update({"AP": np.mean(mAp)})
        ret["bbox"] = bbox
        return ret


##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/crowd_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------

"""Python implementation of the crowd AP evaluation code."""


def parse_rec(ann, area_range=(0, 1e10)):
    """Parse a crowd json obj."""
    objects = []

    for obj in ann['gtboxes']:
        boxes_diff = obj['tag'] != 'person'
        head_attr = obj['head_attr']
        if 'extra' in obj.keys():
            extra = obj['extra']
            if 'ignore' in extra.keys() and extra['ignore']:
                boxes_diff = True
            if 'unsure' in extra.keys() and extra['unsure']:
                boxes_diff = True
        
        for cls in CLASS_NAMES:
            difficult = boxes_diff
            if 'hbox' == cls  and not difficult:
                if 'ignore' in head_attr.keys() and head_attr['ignore'] or 'unsure' in head_attr.keys() and head_attr['unsure']:
                    difficult = True
            x, y, w, h = obj[cls]
            if area_range[0] <= w*h < area_range[1]:
                obj_struct = {}
                obj_struct["name"] = cls
                obj_struct["pose"] = False
                obj_struct["truncated"] = False
                obj_struct["difficult"] = difficult
                obj_struct["bbox"] = [x, y, x+w, y+h]
                objects.append(obj_struct)
    return objects


def crowd_ap(rec, prec, use_07_metric=False):
    """Compute crowd AP given precision and recall. If use_07_metric is true, uses
    the crowd 07 11-point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.0
        for t in np.arange(0.0, 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.0
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.0], rec, [1.0]))
        mpre = np.concatenate(([0.0], prec, [0.0]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap

def crowd_mr(miss_rate, fppi, num_of_samples=9):
        """ Compute the log average miss-rate from a given MR-FPPI curve.
        The log average miss-rate is defined as the average of a number of evenly spaced log miss-rate samples
        on the :math:`{log}(FPPI)` axis within the range :math:`[10^{-2}, 10^{0}]`
        Args:
            miss_rate (list): miss-rate values
            fppi (list): FPPI values
            num_of_samples (int, optional): Number of samples to take from the curve to measure the average precision; Default **9**
        Returns:
            Number: log average miss-rate
        """

        fppi_tmp = np.insert(fppi, 0, -1.0)
        mr_tmp = np.insert(miss_rate, 0, 1.0)

        # Use 9 evenly spaced reference points in log-space
        ref = np.logspace(-2.0, 0.0, num = num_of_samples)
        for i, ref_i in enumerate(ref):
            # np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
            j = np.where(fppi_tmp <= ref_i)[-1][-1]
            ref[i] = mr_tmp[j]

        # log(0) is undefined, so we use the np.maximum(1e-10, ref)
        lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
        return lamr

@jit  # 10x faster
def iou(BBGT, bb):
    ixmin = np.maximum(BBGT[:, 0], bb[0])
    iymin = np.maximum(BBGT[:, 1], bb[1])
    ixmax = np.minimum(BBGT[:, 2], bb[2])
    iymax = np.minimum(BBGT[:, 3], bb[3])
    iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
    ih = np.maximum(iymax - iymin + 1.0, 0.0)
    inters = iw * ih

    # union
    uni = (
        (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
        + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
        - inters
    )

    overlaps = inters / uni
    ovmax = np.max(overlaps)
    jmax = np.argmax(overlaps)
    return ovmax, jmax

def ious(image_ids, class_recs, BB, ovthresh):
    # go down dets and mark TPs and FPs
    nd = len(image_ids)
    tp = np.zeros(nd)
    fp = np.zeros(nd)
    for d in range(nd):
        R = class_recs[image_ids[d]]
        bb = BB[d, :].astype(float)
        ovmax = -np.inf
        BBGT = R["bbox"].astype(float)

        if BBGT.size > 0:
            # compute overlaps
            # intersection
            ovmax, jmax = iou(BBGT, bb)

        if ovmax > ovthresh:
            if not R["difficult"][jmax]:
                if not R["det"][jmax]:
                    tp[d] = 1.0
                    R["det"][jmax] = 1
                else:
                    fp[d] = 1.0
        else:
            fp[d] = 1.0
    return nd, tp, fp

def crowd_eval(detpath, anns, classname, ovthresh=0.5, use_07_metric=False, logger=None, area_range=(0, 1e10)):
    """rec, prec, ap = crowd_eval(detpath,
                                anns,
                                classname,
                                [ovthresh],
                                [use_07_metric])

    Top level function that does the crowd evaluation.

    detpath: Path to detections
        detpath.format(classname) should produce the detection results file.
    classname: Category name (duh)
    [ovthresh]: Overlap threshold (default = 0.5)
    [use_07_metric]: Whether to use crowd07's 11 point AP computation
        (default False)
    """
    # assumes detections are in detpath.format(classname)
    start = time.clock()

    # first load gt
    imagenames = [ann["ID"] for ann in anns]

    # load annots
    recs = {}
    for ann in anns:
        recs[ann["ID"]] = parse_rec(ann, area_range)

    # extract gt objects for this class
    class_recs = {}
    npos = 0
    for imagename in imagenames:
        R = [obj for obj in recs[imagename] if obj["name"] == classname]
        bbox = np.array([x["bbox"] for x in R])
        difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
        # difficult = np.array([False for x in R]).astype(np.bool)  # treat all "difficult" as GT
        det = [False] * len(R)
        npos = npos + sum(~difficult)
        class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}

    # read dets
    detfile = detpath.format(classname)
    with open(detfile, "r") as f:
        lines = f.readlines()

    splitlines = [x.strip().split(" ") for x in lines]
    image_ids = [x[0] for x in splitlines]
    confidence = np.array([float(x[1]) for x in splitlines])
    BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
    areas = BB[..., [2,3]] - BB[..., [0,1]]
    areas = areas[..., 0] * areas[..., 1]
    areas_idx = (areas >= area_range[0]) & (areas < area_range[1])

    confidence = confidence[areas_idx]
    BB = BB[areas_idx]
    image_ids = [image_ids[i] for i in range(len(image_ids)) if areas_idx[i]]

    # sort by confidence
    sorted_ind = np.argsort(-confidence)
    BB = BB[sorted_ind, :]
    image_ids = [image_ids[x] for x in sorted_ind]
    nd, tp, fp = ious(image_ids, class_recs, BB, ovthresh)

    # compute precision recall
    fp = np.cumsum(fp)
    tp = np.cumsum(tp)
    rec = tp / float(npos)
    # avoid divide by zero in case the first detection matches a difficult
    # ground truth
    prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
    ap = crowd_ap(rec, prec, use_07_metric)

    # If an element of fp + tp is 0,
    # the corresponding element of mr[l] is nan.
    mr = 1 - tp / npos
    # If n_pos[l] is 0, fppi[l] is None.
    fppi = fp / len(anns)
    lamr = crowd_mr(mr, fppi)

    end = time.clock()
    if logger:
        logger.info("{}: Thresh {:.2f}: Scale = {:d} AP = {:.6f} LAMR={:.6f}".format(classname, ovthresh, int(np.sqrt(area_range[0])/32), ap, lamr))

    return rec, prec, ap, lamr
