import os
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from collections.abc import Sequence
from itertools import chain
from multiprocessing import Pool

import numpy as np
import torch


def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
    """Calculate the ious between each bbox of bboxes1 and bboxes2.
    Args:
        bboxes1(ndarray): shape (n, 4)
        bboxes2(ndarray): shape (k, 4)
        mode(str): iou (intersection over union) or iof (intersection
            over foreground)
    Returns:
        ious(ndarray): shape (n, k)
    """

    assert mode in ['iou', 'iof']

    bboxes1 = bboxes1.astype(np.float32)
    bboxes2 = bboxes2.astype(np.float32)
    rows = bboxes1.shape[0]
    cols = bboxes2.shape[0]
    ious = np.zeros((rows, cols), dtype=np.float32)
    if rows * cols == 0:
        return ious
    exchange = False
    if bboxes1.shape[0] > bboxes2.shape[0]:
        bboxes1, bboxes2 = bboxes2, bboxes1
        ious = np.zeros((cols, rows), dtype=np.float32)
        exchange = True
    area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
    area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
    for i in range(bboxes1.shape[0]):
        x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
        y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
        x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
        y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
        overlap = np.maximum(x_end - x_start, 0) * np.maximum(
            y_end - y_start, 0)
        if mode == 'iou':
            union = area1[i] + area2 - overlap
        else:
            union = area1[i] if not exchange else area2
        union = np.maximum(union, eps)
        ious[i, :] = overlap / union
    if exchange:
        ious = ious.T
    return ious


def average_precision(recalls, precisions, mode='area'):
    """Calculate average precision (for single or multiple scales).
    Args:
        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
        mode (str): 'area' or '11points', 'area' means calculating the area
            under precision-recall curve, '11points' means calculating
            the average precision of recalls at [0, 0.1, ..., 1]
    Returns:
        float or ndarray: calculated average precision
    """
    no_scale = False
    if recalls.ndim == 1:
        no_scale = True
        recalls = recalls[np.newaxis, :]
        precisions = precisions[np.newaxis, :]
    assert recalls.shape == precisions.shape and recalls.ndim == 2
    num_scales = recalls.shape[0]
    ap = np.zeros(num_scales, dtype=np.float32)
    if mode == 'area':
        zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
        ones = np.ones((num_scales, 1), dtype=recalls.dtype)
        mrec = np.hstack((zeros, recalls, ones))
        mpre = np.hstack((zeros, precisions, zeros))
        for i in range(mpre.shape[1] - 1, 0, -1):
            mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
        for i in range(num_scales):
            ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
            ap[i] = np.sum(
                (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
    elif mode == '11points':
        for i in range(num_scales):
            for thr in np.arange(0, 1 + 1e-3, 0.1):
                precs = precisions[i, recalls[i, :] >= thr]
                prec = precs.max() if precs.size > 0 else 0
                ap[i] += prec
        ap /= 11
    else:
        raise ValueError(
            'Unrecognized mode, only "area" and "11points" are supported')
    if no_scale:
        ap = ap[0]
    return ap


def get_cls_results(det_results, annotations, class_id):
    """Get det results and gt information of a certain class.
    Args:
        det_results (list[list]): Same as `eval_map()`.
        annotations (list[dict]): Same as `eval_map()`.
        class_id (int): ID of a specific class.
    Returns:
        tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
    """
    cls_dets = [img_res[class_id] for img_res in det_results]
    cls_gts = []
    cls_gts_ignore = []
    for ann in annotations:
        gt_inds = ann['labels'] == class_id
        cls_gts.append(ann['bboxes'][gt_inds, :])

        if ann.get('labels_ignore', None) is not None:
            ignore_inds = ann['labels_ignore'] == class_id
            cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
        else:
            cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))

    return cls_dets, cls_gts, cls_gts_ignore


def tpfp_default(det_bboxes,
                 gt_bboxes,
                 gt_bboxes_ignore=None,
                 iou_thr=0.5,
                 area_ranges=None,
                 img_idx=None):
    """Check if detected bboxes are true positive or false positive.
    Args:
        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
            of shape (k, 4). Default: None
        iou_thr (float): IoU threshold to be considered as matched.
            Default: 0.5.
        area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
            in the format [(min1, max1), (min2, max2), ...]. Default: None.
    Returns:
        tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
            each array is (num_scales, m).
    """
    # an indicator of ignored gts
    gt_ignore_inds = np.concatenate(
        (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
         np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
    # stack gt_bboxes and gt_bboxes_ignore for convenience
    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))

    num_dets = det_bboxes.shape[0]
    num_gts = gt_bboxes.shape[0]
    if area_ranges is None:
        area_ranges = [(None, None)]
    num_scales = len(area_ranges)
    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
    # a certain scale
    tp = np.zeros((num_scales, num_dets), dtype=np.float32)
    fp = np.zeros((num_scales, num_dets), dtype=np.float32)

    # if there is no gt bboxes in this image, then all det bboxes
    # within area range are false positives
    if gt_bboxes.shape[0] == 0:
        fn = np.zeros((num_scales, 0), dtype=np.float32)
        gt_save = np.zeros((num_scales, 0, 4), dtype=np.float32)
        if area_ranges == [(None, None)]:
            fp[...] = 1
        else:
            det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
                det_bboxes[:, 3] - det_bboxes[:, 1])
            for i, (min_area, max_area) in enumerate(area_ranges):
                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
        return tp, fp, fn, gt_save, -1

    ious = bbox_overlaps(det_bboxes, gt_bboxes)
    # for each det, the max iou with all gts
    ious_max = ious.max(axis=1)
    # for each det, which gt overlaps most with it
    ious_argmax = ious.argmax(axis=1)

    # 存一下fn和不对的gt box，好发觉bad case
    # fn = np.zeros((num_scales, num_gts), dtype=np.float32)
    fn = np.zeros((num_scales, num_gts), dtype=np.bool_)
    gt_save = np.zeros((num_scales, num_gts, 4), dtype=np.float32)
    # sort all dets in descending order by scores
    # 按照score排序
    sort_inds = np.argsort(-det_bboxes[:, -1])
    for k, (min_area, max_area) in enumerate(area_ranges):
        gt_covered = np.zeros(num_gts, dtype=bool)
        # if no area range is specified, gt_area_ignore is all False
        if min_area is None:
            gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
        else:
            gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
                gt_bboxes[:, 3] - gt_bboxes[:, 1])
            gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
        # 按照分数排序进行匹配
        for i in sort_inds:
            # 如果大于当前阈值
            if ious_max[i] >= iou_thr:
                # 那么这个gt就被匹配了
                matched_gt = ious_argmax[i]
                if not (gt_ignore_inds[matched_gt]
                        or gt_area_ignore[matched_gt]):
                    if not gt_covered[matched_gt]:
                        gt_covered[matched_gt] = True
                        tp[k, i] = 1
                    else:
                        fp[k, i] = 1
                # otherwise ignore this detected bbox, tp = 0, fp = 0
            elif min_area is None:
                fp[k, i] = 1
            else:
                bbox = det_bboxes[i, :4]
                area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
                if area >= min_area and area < max_area:
                    fp[k, i] = 1

        fn[k] = (~gt_covered)#.astype(np.float32)
        gt_save[k, fn[k]] = gt_bboxes[fn[k]]

    if fp.sum() > 0:
        return tp, fp, fn.astype(np.float32), gt_save, img_idx
    else:
        return tp, fp, fn.astype(np.float32), gt_save, -1


def eval_map(det_results,
             annotations,
             scale_ranges=None,
             iou_thr=0.5,
             dataset=None,
             logger=None,
             tpfp_fn=None,
             nproc=4):
    """Evaluate mAP of a dataset.
    Args:
        det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
            The outer list indicates images, and the inner list indicates
            per-class detected bboxes.
        annotations (list[dict]): Ground truth annotations where each item of
            the list indicates an image. Keys of annotations are:
            - `bboxes`: numpy array of shape (n, 4)
            - `labels`: numpy array of shape (n, )
            - `bboxes_ignore` (optional): numpy array of shape (k, 4)
            - `labels_ignore` (optional): numpy array of shape (k, )
        scale_ranges (list[tuple] | None): Range of scales to be evaluated,
            in the format [(min1, max1), (min2, max2), ...]. A range of
            (32, 64) means the area range between (32**2, 64**2).
            Default: None.
        iou_thr (float): IoU threshold to be considered as matched.
            Default: 0.5.
        dataset (list[str] | str | None): Dataset name or dataset classes,
            there are minor differences in metrics for different datsets, e.g.
            "voc07", "imagenet_det", etc. Default: None.
        logger (logging.Logger | str | None): The way to print the mAP
            summary. See `mmcv.utils.print_log()` for details. Default: None.
        tpfp_fn (callable | None): The function used to determine true/
            false positives. If None, :func:`tpfp_default` is used as default
            unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
            case). If it is given as a function, then this function is used
            to evaluate tp & fp. Default None.
        nproc (int): Processes used for computing TP and FP.
            Default: 4.
    Returns:
        tuple: (mAP, [dict, dict, ...])
    """
    assert len(det_results) == len(annotations)

    num_imgs = len(det_results)
    num_scales = len(scale_ranges) if scale_ranges is not None else 1
    num_classes = len(det_results[0])  # positive class num
    area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
                   if scale_ranges is not None else None)

    pool = Pool(nproc)
    eval_results = []
    # 正对每一类分别计算
    for i in range(num_classes):
        # get gt and det bboxes of this class in all imgs
        cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
            det_results, annotations, i)
        # choose proper function according to datasets to compute tp and fp
        tpfp_fn = tpfp_default
        if not callable(tpfp_fn):
            raise ValueError(
                f'tpfp_fn has to be a function or None, but got {tpfp_fn}')

        # compute tp and fp for each image with multiple processes
        tpfp = pool.starmap(
            tpfp_fn,
            zip(cls_dets, cls_gts, cls_gts_ignore,
                [iou_thr for _ in range(num_imgs)],
                [area_ranges for _ in range(num_imgs)],
                range(num_imgs)))
        # 计算出这一类所有图片的tp和fp
        tp, fp, fn, gt_save, fp_img_idx = tuple(zip(*tpfp))
        # tp, fp = tuple(zip(*tpfp))
        # calculate gt number of each scale
        # ignored gts or gts beyond the specific scale are not counted
        # 统计一下这个类的gt数量
        num_gts = np.zeros(num_scales, dtype=int)
        gt_per_img = []
        for j, bbox in enumerate(cls_gts):
            if area_ranges is None:
                num_gts[0] += bbox.shape[0]
                gt_per_img.append(bbox.shape[0])
            else:
                gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
                    bbox[:, 3] - bbox[:, 1])
                for k, (min_area, max_area) in enumerate(area_ranges):
                    num_gts[k] += np.sum((gt_areas >= min_area)
                                         & (gt_areas < max_area))
        # sort all det bboxes by score, also sort tp and fp
        cls_dets = np.vstack(cls_dets)
        num_dets = cls_dets.shape[0]

        # 用来统计漏检
        tp_tmp = np.squeeze(np.hstack(
            [i.sum(axis=-1, keepdims=True) for i in tp]))
        gt_per_img = np.array(gt_per_img)
        fn = np.where(np.squeeze(np.hstack([i.sum(axis=-1, keepdims=True) for i in fn])) > 0)[0].tolist()
        fn_ = np.where(np.abs(gt_per_img - tp_tmp) > 0)[0].tolist()
        # assert fn == fn_

        sort_inds = np.argsort(-cls_dets[:, -1])
        tp = np.hstack(tp)[:, sort_inds]
        fp = np.hstack(fp)[:, sort_inds]
        # calculate recall and precision with tp and fp
        tp = np.cumsum(tp, axis=1)
        fp = np.cumsum(fp, axis=1)
        eps = np.finfo(np.float32).eps
        recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
        precisions = tp / np.maximum((tp + fp), eps)
        # calculate AP
        if scale_ranges is None:
            recalls = recalls[0, :]
            precisions = precisions[0, :]
            num_gts = num_gts.item()
        mode = 'area' if dataset != 'voc07' else '11points'
        ap = average_precision(recalls, precisions, mode)

        fp_img_idx = [i for i in fp_img_idx if i >= 0]
        gt_save_ = defaultdict(list)
        for i in fp_img_idx:
            for j in range(num_scales):
                tmp = gt_save[i][j][gt_save[i][j].sum(axis=1) > 0]
                gt_save_[i].append(tmp)
        for i in fn:
            for j in range(num_scales):
                tmp = gt_save[i][j][gt_save[i][j].sum(axis=1) > 0]
                gt_save_[i].append(tmp)
        for k, v in gt_save_.items():
            gt_save_[k] = np.concatenate(v, axis=0)


        try:
            tp = np.squeeze(tp)[-1]
        except:
            tp = 0.

        try:
            fp = np.squeeze(fp)[-1]
        except:
            fp = 0.

        try:
            simple_recall = np.squeeze(recalls)[-1]
        except:
            simple_recall = 1.

        eval_results.append({
            'num_gts': num_gts,
            'num_dets': num_dets,
            'recall': recalls,
            'precision': precisions,
            'ap': ap,
            'fp_img_idx': fp_img_idx,
            'fn_img_idx': fn,
            'tp_n': tp,
            'fp_n': fp,
            # 存所有出错的gt
            'gt_save': gt_save_,
            'simple_recall': simple_recall
        })
    pool.close()
    if scale_ranges is not None:
        # shape (num_classes, num_scales)
        all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
        all_num_gts = np.vstack(
            [cls_result['num_gts'] for cls_result in eval_results])
        mean_ap = []
        for i in range(num_scales):
            if np.any(all_num_gts[:, i] > 0):
                mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
            else:
                mean_ap.append(0.0)
    else:
        aps = []
        for cls_result in eval_results:
            if cls_result['num_gts'] > 0:
                aps.append(cls_result['ap'])
        mean_ap = np.array(aps).mean().item() if aps else 0.0

    # print_map_summary(
    #     mean_ap, eval_results, dataset, area_ranges, logger=logger)

    return mean_ap, eval_results

def parse_rec(filename):
    """Parse a PASCAL VOC xml file."""
    tree = ET.parse(filename)
    objects = []
    for obj in tree.findall("object"):
        obj_struct = {}
        obj_struct["name"] = obj.find("name").text
        obj_struct["pose"] = obj.find("pose").text
        obj_struct["truncated"] = int(obj.find("truncated").text)
        obj_struct["difficult"] = int(obj.find("difficult").text)
        bbox = obj.find("bndbox")
        obj_struct["bbox"] = [
            int(bbox.find("xmin").text),
            int(bbox.find("ymin").text),
            int(bbox.find("xmax").text),
            int(bbox.find("ymax").text),
        ]
        objects.append(obj_struct)

    return objects

def bbox2result(bboxes, labels, num_classes):
    """Convert detection results to a list of numpy arrays.
    Args:
        bboxes (torch.Tensor | np.ndarray): shape (n, 5)
        labels (torch.Tensor | np.ndarray): shape (n, )
        num_classes (int): class number, including background class
    Returns:
        list(ndarray): bbox results of each class
    """
    if bboxes.shape[0] == 0:
        return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
    else:
        if isinstance(bboxes, torch.Tensor):
            bboxes = bboxes.detach().cpu().numpy()
            labels = labels.detach().cpu().numpy()
        return [bboxes[labels == i, :] for i in range(num_classes)]


def _recalls(all_ious, proposal_nums, thrs):

    img_num = all_ious.shape[0]
    total_gt_num = sum([ious.shape[0] for ious in all_ious])

    _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
    for k, proposal_num in enumerate(proposal_nums):
        tmp_ious = np.zeros(0)
        for i in range(img_num):
            ious = all_ious[i][:, :proposal_num].copy()
            gt_ious = np.zeros((ious.shape[0]))
            if ious.size == 0:
                tmp_ious = np.hstack((tmp_ious, gt_ious))
                continue
            for j in range(ious.shape[0]):
                gt_max_overlaps = ious.argmax(axis=1)
                max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
                gt_idx = max_ious.argmax()
                gt_ious[j] = max_ious[gt_idx]
                box_idx = gt_max_overlaps[gt_idx]
                ious[gt_idx, :] = -1
                ious[:, box_idx] = -1
            tmp_ious = np.hstack((tmp_ious, gt_ious))
        _ious[k, :] = tmp_ious

    _ious = np.fliplr(np.sort(_ious, axis=1))
    recalls = np.zeros((proposal_nums.size, thrs.size))
    for i, thr in enumerate(thrs):
        recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)

    return recalls


def set_recall_param(proposal_nums, iou_thrs):
    """Check proposal_nums and iou_thrs and set correct format."""
    if isinstance(proposal_nums, Sequence):
        _proposal_nums = np.array(proposal_nums)
    elif isinstance(proposal_nums, int):
        _proposal_nums = np.array([proposal_nums])
    else:
        _proposal_nums = proposal_nums

    if iou_thrs is None:
        _iou_thrs = np.array([0.5])
    elif isinstance(iou_thrs, Sequence):
        _iou_thrs = np.array(iou_thrs)
    elif isinstance(iou_thrs, float):
        _iou_thrs = np.array([iou_thrs])
    else:
        _iou_thrs = iou_thrs

    return _proposal_nums, _iou_thrs


def eval_recalls(gts,
                 proposals,
                 proposal_nums=None,
                 iou_thrs=0.5,
                 logger=None):
    """Calculate recalls.
    Args:
        gts (list[ndarray]): a list of arrays of shape (n, 4)
        proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
        proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
        iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
        logger (logging.Logger | str | None): The way to print the recall
            summary. See `mmcv.utils.print_log()` for details. Default: None.
    Returns:
        ndarray: recalls of different ious and proposal nums
    """

    img_num = len(gts)
    assert img_num == len(proposals)

    proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)

    all_ious = []
    for i in range(img_num):
        if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
            scores = proposals[i][:, 4]
            sort_idx = np.argsort(scores)[::-1]
            img_proposal = proposals[i][sort_idx, :]
        else:
            img_proposal = proposals[i]
        prop_num = min(img_proposal.shape[0], proposal_nums[-1])
        if gts[i] is None or gts[i].shape[0] == 0:
            ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
        else:
            ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
        all_ious.append(ious)
    all_ious = np.array(all_ious)
    recalls = _recalls(all_ious, proposal_nums, iou_thrs)

    # print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
    return recalls


def evaluate(results,
             annotations,
             # ann_parse_func,
             metric='mAP',
             logger=None,
             proposal_nums=(1, 10, 36),
             iou_thr=0.5,
             scale_ranges=None,
             CLASSES=None,
             nproc=4):
    """Evaluate in VOC protocol.
    Args:
        results (list[list | tuple]): Testing results of the dataset.
        metric (str | list[str]): Metrics to be evaluated. Options are
            'mAP', 'recall'.
        logger (logging.Logger | str, optional): Logger used for printing
            related information during evaluation. Default: None.
        proposal_nums (Sequence[int]): Proposal number used for evaluating
            recalls, such as recall@100, recall@1000.
            Default: (100, 300, 1000).
        iou_thr (float | list[float]): IoU threshold. Default: 0.5.
        scale_ranges (list[tuple], optional): Scale ranges for evaluating
            mAP. If not specified, all bounding boxes would be included in
            evaluation. Default: None.
    Returns:
        dict[str, float]: AP/recall metrics.
    """

    if not isinstance(metric, str):
        assert len(metric) == 1
        metric = metric[0]
    allowed_metrics = ['mAP', 'recall']
    if metric not in allowed_metrics:
        raise KeyError(f'metric {metric} is not supported')
    # annotations = list(chain(*[ann_parse_func(d) for d in annotations]))
    eval_results = OrderedDict()
    iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
    if metric == 'mAP':
        assert isinstance(iou_thrs, list)
        if CLASSES is None:
            ds_name = 'voc07'
        else:
            ds_name = CLASSES
        mean_aps = []
        others = []
        for iou_thr in iou_thrs:
            # print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
            mean_ap, other = eval_map(
                results,
                annotations,
                scale_ranges=None,
                iou_thr=iou_thr,
                dataset=ds_name,
                logger=logger,
                nproc=nproc)
            mean_aps.append(mean_ap)
            others.append(other)
            eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
        eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
        return eval_results, others
    elif metric == 'recall':
        gt_bboxes = [ann['bboxes'] for ann in annotations]
        recalls = eval_recalls(
            gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
        for i, num in enumerate(proposal_nums):
            for j, iou in enumerate(iou_thr):
                eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
        if recalls.shape[1] > 1:
            ar = recalls.mean(axis=1)
            for i, num in enumerate(proposal_nums):
                eval_results[f'AR@{num}'] = ar[i]
        return eval_results, None


class AnalyzeEval(object):
    def __init__(self, eval_res):
        """
        'num_gts': num_gts,
        'num_dets': num_dets,
        'recall': recalls,
        'precision': precisions,
        'ap': ap,
        'fp_img_idx': [i for i in fp_img_idx if i >= 0],
        'fn_img_idx': fn,
        'tp_n': np.squeeze(tp)[-1],
        'fp_n': np.squeeze(fp)[-1],
        'simple_recall': np.squeeze(recalls)[-1]
        """
        var = defaultdict(list)
        for k in list(eval_res[0].keys()):
            for i in eval_res:
                var[k].append(i[k])
        for k, v in var.items():
            v = self.decode_format(v)
            self.__setattr__(k, v)

    def per_cls(self):
        pass

    def show_save(self, logger, img_idx=None, save_path='result/result.txt'):
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        self.fp_img_idx = np.unique(self.fp_img_idx)
        self.fn_img_idx = np.unique(self.fn_img_idx)
        # logger.info('fp_img_idx: ', self.fp_img_idx)
        # logger.info('fn_img_idx: ', self.fn_img_idx)
        tp_n = sum(self.tp_n)
        fp_n = sum(self.fp_n)
        precision = tp_n / max(sum(self.num_dets), 1) * 100
        recall = sum(self.simple_recall) / len(self.simple_recall) * 100
        logger.info('precision: {}'.format(precision))
        logger.info('recall: {}'.format(recall))

        res_fp = []
        if img_idx is not None:
            for i in self.fp_img_idx:
                res_fp.append([img_idx[i], i])

        res_fn = []
        if img_idx is not None:
            for i in self.fn_img_idx:
                res_fn.append([img_idx[i], i])

        gt_error_show = defaultdict(list)
        for i in self.fn_img_idx:
            for cls in self.gt_save:
                gt_error_show[i].append(cls.pop(i, np.zeros((0, 4))))
            gt_error_show[i] = np.unique(np.concatenate(gt_error_show[i], axis=0), axis=0).tolist()

        with open(save_path, 'w') as f:
            f.write("precision: {}\n".format(precision))
            f.write("recall: {}\n".format(recall))

            f.write('fp:\n')
            ids = "["
            for ff, idx in res_fp:
                f.write(ff + '     ' + str(idx) + '\n')
                ids += "{},".format(idx)

            f.write('img_idx:\n')
            if len(ids) > 1:
                f.write(ids[:-1] + "]\n")
            else:
                f.write("]\n")

            f.write('*' * 150 + ':\n')

            f.write('fn:\n')
            ids = "["
            for ff, idx in res_fn:
                f.write(ff + '     ' + str(idx) + '\n')
                f.write(str(gt_error_show[idx]) + '\n')
                ids += "{},".format(idx)

            f.write('img_idx:\n')
            if len(ids) > 1:
                f.write(ids[:-1] + "]\n")
            else:
                f.write("]\n")
        return res_fp, res_fn

    @staticmethod
    def decode_format(data):
        if not isinstance(data, (list, tuple)):
            data = [data]
        if isinstance(data[0], (list, tuple)):
            data = list(chain(*data))
        elif isinstance(data[0], np.ndarray):
            if data[0].ndim != 1:
                data = [np.squeeze(d) for d in data]
            assert data[0].ndim == 1
            data = list(chain(*[d.tolist() for d in data]))
        elif isinstance(data[0], (int, float)):
            data = data
        elif isinstance(data[0], (np.float32, np.float64)):
            data = [d.tolist()
                    if not isinstance(d, (int, float)) else d
                    for d in data]
        else:
            # raise NotImplementedError
            return data
        return data


# if __name__ == '__main__':
    # iou_thr = [0.5]
    # mAP_eval, others = evaluate(res_all, ann_all,
    #                                   metric='mAP',  # recall
    #                                   iou_thr=iou_thr, nproc=1)

    # a_res = AnalyzeEval(others[0])
    # a_res.show_save(ann_idx, 'result.txt')
