# -*- coding:utf-8 -*-

# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------

import xml.etree.ElementTree as ET
import os
import cPickle
import numpy as np

from collections import OrderedDict

def parse_rec(filename):
    """ Parse a PASCAL VOC xml file """
    tree = ET.parse(filename)
    objects = []
    for obj in tree.findall('object'):
        obj_struct = {}
        obj_struct['name'] = obj.find('name').text
        obj_struct['pose'] = obj.find('pose').text
        obj_struct['truncated'] = int(obj.find('truncated').text)
        obj_struct['difficult'] = int(obj.find('difficult').text)
        bbox = obj.find('bndbox')
        obj_struct['bbox'] = [int(bbox.find('xmin').text),
                              int(bbox.find('ymin').text),
                              int(bbox.find('xmax').text),
                              int(bbox.find('ymax').text)]
        objects.append(obj_struct)

    return objects

def voc_ap(rec, prec, use_voc_metric=False, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_voc_metric], [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_voc_metric is true, uses the
    VOC method (default:False).
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_voc_metric:
        if use_07_metric:
            # 11 point metric
            ap = 0.
            for t in np.arange(0., 1.1, 0.1):
                if np.sum(rec >= t) == 0:
                    p = 0
                else:
                    p = np.max(prec[rec >= t])
                ap = ap + p / 11.
        else:
            # correct AP calculation
            # first append sentinel values at the end
            mrec = np.concatenate(([0.], rec, [1.]))
            mpre = np.concatenate(([0.], prec, [0.]))

            # compute the precision envelope
            for i in range(mpre.size - 1, 0, -1):
                mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

            # to calculate area under PR curve, look for points
            # where X axis (recall) changes value
            i = np.where(mrec[1:] != mrec[:-1])[0]

            # and sum (\Delta recall) * prec
            ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    else:
        ap = 2/(1/prec[-1] + 1/rec[-1])

    return ap

def voc_eval(detpath,
             annopath,
             imagesetfile,
             classname,
             cachedir,
             dict_list,
             conf_thresh=0.01,
             ovthresh=0.5,
             use_voc_metric=False,
             use_07_metric=False,
             vis_eval=False):
    """rec, prec, ap = voc_eval(detpath,
                                annopath,
                                imagesetfile,
                                classname,
                                [ovthresh],
                                [use_07_metric])

    Top level function that does the PASCAL VOC evaluation.

    detpath: Path to detections
        detpath.format(classname) should produce the detection results file.
    annopath: Path to annotations
        annopath.format(imagename) should be the xml annotations file.
    imagesetfile: Text file containing the list of images, one image per line.
    classname: Category name (duh)
    cachedir: Directory for caching the annotations
    dict_list: List for box in the evaluations.
            用来保存评价后预测结果的box信息(TP,FP的情况保存预测框, FN的情况保存基准框)
    conf_thresh: confidence thresh (default = 0.01)
    [ovthresh]: Overlap threshold (default = 0.5)
    [use_voc_metric]: Whether to use VOC's AP computation
    (default False)
    [use_07_metric]: Whether to use VOC07's 11 point AP computation
        (default False)
    """
    # assumes detections are in detpath.format(classname)
    # assumes annotations are in annopath.format(imagename)
    # assumes imagesetfile is a text file with each line an image name
    # cachedir caches the annotations in a pickle file

    # first load gt
    if not os.path.isdir(cachedir):
        os.mkdir(cachedir)
    cachefile = os.path.join(cachedir, 'annots.pkl')
    # read list of images
    with open(imagesetfile, 'r') as f:
        lines = f.readlines()
    imagenames = [x.strip() for x in lines]

    if not os.path.isfile(cachefile):
        # load annots
        recs = {}
        for i, imagename in enumerate(imagenames):
            recs[imagename] = parse_rec(annopath.format(imagename))
            if i % 100 == 0:
                print 'Reading annotation for {:d}/{:d}'.format(
                    i + 1, len(imagenames))
        # save
        print 'Saving cached annotations to {:s}'.format(cachefile)
        with open(cachefile, 'w') as f:
            cPickle.dump(recs, f)
    else:
        # load
        with open(cachefile, 'r') as f:
            recs = cPickle.load(f)
    # annots.pkl -- 存有需要进行evaluating的数据集的ground-box的xml信息

    # extract gt objects for this class
    class_recs = {}
    npos = 0
    for imagename in imagenames:
        R = [obj for obj in recs[imagename] if obj['name'] == classname]
        bbox = np.array([x['bbox'] for x in R])
        difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
        det = [False] * len(R)
        # 由于我们的样本中difficult都是0, 因此这里的npos为正值
        # ~ is the bitwise complement operator in python
        # which essentially calculates -x - 1
        # sum(~difficult)可以过滤掉带有<difficult>1</difficult>的object box
        npos = npos + sum(~difficult)
        class_recs[imagename] = {'bbox': bbox,
                                 'difficult': difficult,
                                 'det': det}
    # print(class_recs)
    # read dets
    # 一个detfile含有某一类的模型预测得到的"图片名, 置信度, boxes"
    # 一行对应一个预测的Object
    detfile = detpath.format(classname)
    with open(detfile, 'r') as f:
        lines = f.readlines()

    splitlines = [x.strip().split(' ') for x in lines]
    # image_ids与box对应, 即一个image_ids 对应 一个box, image_ids中可能有多个相同的id,
    # 因为一张图片可以有多个predicted box
    image_ids = [x[0] for x in splitlines]
    confidence = np.array([float(x[1]) for x in splitlines])
    BB = np.array([[float(z) for z in x[2:]] for x in splitlines])

    # 根据confidence thresh,过滤predicted box
    filter_inds = np.where(confidence >= conf_thresh)[0]
    image_ids = [x for i, x in enumerate(image_ids) if i in list(filter_inds)]
    confidence = confidence[filter_inds]
    BB = BB[filter_inds, :]
    # print('images_ids, confidence, BB', len(image_ids), confidence.shape, BB.shape)

    # sort by confidence from the big one to the small one
    sorted_ind = np.argsort(-confidence)  # np.argsort -- get the index from small one to big one
    sorted_scores = np.sort(-confidence)  # np.sort -- sort elements from small one to big one
    BB = BB[sorted_ind, :]
    image_ids = [image_ids[x] for x in sorted_ind]

    # go down dets and mark TPs and FPs
    nd = len(image_ids)
    tp = np.zeros(nd)
    fp = np.zeros(nd)
    # 遍历所有predicted box
    for d in range(nd):
        R = class_recs[image_ids[d]]  # gt objects's xml info
        bb = BB[d, :].astype(float)   # predicted object's info
        ovmax = -np.inf
        BBGT = R['bbox'].astype(float)  # gt objects's bounding-box, BBGT可能有多行, 即有多个gt box

        if BBGT.size > 0:
            # compute overlaps
            # intersection
            ixmin = np.maximum(BBGT[:, 0], bb[0])
            iymin = np.maximum(BBGT[:, 1], bb[1])
            ixmax = np.minimum(BBGT[:, 2], bb[2])
            iymax = np.minimum(BBGT[:, 3], bb[3])
            iw = np.maximum(ixmax - ixmin + 1., 0.)
            ih = np.maximum(iymax - iymin + 1., 0.)
            inters = iw * ih

            # union
            uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
                   (BBGT[:, 2] - BBGT[:, 0] + 1.) *
                   (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
            overlaps = inters / uni
            # print(overlaps)
            ovmax = np.max(overlaps)  # 获得当前图片中gt boxes与当前predicted box的最大重合度
            jmax = np.argmax(overlaps)  # 获得当前图片中BBGT中gt boxes与当前predicted box的最大重合度的gt box的行索引
        else:
            # 对于不含gt box的图片, 即不含目标的图片, 如果出现预测框, 则判定为FP, 用state=0表示, 且overlaps记为0.0
            # 注意: 在评价前, 预测框已经经过了“置信度阈值过滤”, "NMS"
            fp[d] = 1.
            row5 = OrderedDict(
                [('ImgName', str(image_ids[d])), ('ClsName', classname), ('score', - sorted_scores[d]),
                 ('xmin', bb[0] - 1), ('ymin', bb[1] - 1), ('xmax', bb[2] - 1),
                 ('ymax', bb[3] - 1), ('overlaps', float(0.0)), ('state', int(0))])
            dict_list.append(row5)

        # print('---classname={}, max_overlaps={}, thresh={}---'.format(classname, ovmax, ovthresh))
        if ovmax > ovthresh:
            # difficult为真的predicted box不参与评价
            if not R['difficult'][jmax]:
                # det为真的predicted box不参与评价
                if not R['det'][jmax]:
                    # tp = 1 表示预测正确,
                    # 即预测某个box里含有classname, 其实际也是classname, 且重合度超过阈值,
                    tp[d] = 1.
                    # 该命令的目的是把这个gt box标记, 使得后面的predicted box不再与它进行对比
                    R['det'][jmax] = 1
                    # 保存box信息, state=1表示TP
                    # bbox坐标减1是因为在预测完成后, 保存detection.pkl时predicted box坐标都加了1,
                    # 所以减1, 保证跟预测框的实际大小保证一致
                    # 见pascal_voc中的_write_voc_results_file()
                    if vis_eval:
                        row1 = OrderedDict(
                            [('ImgName', str(image_ids[d])), ('ClsName', classname), ('score', - sorted_scores[d]),
                             ('xmin', bb[0] - 1.), ('ymin', bb[1] - 1.), ('xmax', bb[2] - 1.), ('ymax', bb[3] - 1.),
                             ('overlaps', float(ovmax)), ('state', int(1))])
                        dict_list.append(row1)
                else:
                    fp[d] = 1.
                    # 这步会不会有漏洞? 一个gt box可以有很多个predicted box
                    # 而这里每次只是拿一个predicted box与所有的同类gt box比较, 取当前最大重合度的那个
                    # 如果这个predicted box并不是跟gt box的重合度真正最大的那个呢?
                    # 这样做不会有有漏洞, 因为所有的predicted boxes都经过了"得分大于阈值--非极大值抑制"的处理
                    # 这样的处理保证了predicted boxes之间的重合度小于阈值, 且得分尽可能的大
                    # 注意: 对于一张图片的某一个目标物, 非线性极大抑制之后, 仍然可能出现, 一个gt box周围有多个predicted box
                    # 只不过, 这些predicted boxes之间的重合度小于阈值, 且得分尽可能的大
                    # 保存box信息, state=0表示FP
                    if vis_eval:
                        row2 = OrderedDict(
                            [('ImgName', str(image_ids[d])), ('ClsName', classname), ('score', - sorted_scores[d]),
                             ('xmin', bb[0] - 1.), ('ymin', bb[1] - 1.), ('xmax', bb[2] - 1.), ('ymax', bb[3] - 1.),
                             ('overlaps', float(ovmax)), ('state', int(0))])
                        dict_list.append(row2)
                    # -------------------------------------------------------------------------------------------------

        if 0 < ovmax < ovthresh:
            fp[d] = 1.
            # fp=1表示预测错误, 即预测某个box里含有classname, 但实际不是classname
            # 保存box信息, state=0表示FP
            if vis_eval:
                row3 = OrderedDict(
                    [('ImgName', str(image_ids[d])), ('ClsName', str(classname)), ('score', - sorted_scores[d]),
                     ('xmin', bb[0] - 1.), ('ymin', bb[1] - 1.), ('xmax', bb[2] - 1.), ('ymax', bb[3] - 1.),
                     ('overlaps', float(ovmax)), ('state', int(0))])
                dict_list.append(row3)

    # 后来我有个疑问, 只考虑'det' is False的gt box, 会不会有问题?
    # 不会有问题, 注意: 在评价之前, 所有gt box都默认设置了'det' is False
    # 上面已经遍历了所有predicted box, 如果说, 在所有predicted boxes与所有gt boxes比较之后,
    # 仍然存在'det' is False的gt box, 那这个gt box就是漏检的情况, 即FN
    # 换一个角度, FN的个数 + TP的个数 = gt box的个数
    # 注意, TP的个数 对应为 'det' is True 的个数
    # 那么, FN的个数 对应为  'det' is False 的个数
    # 保存R['det']==0的box信息, 即漏检的基准框
    if vis_eval:
        for imagename in imagenames:
            class_obj = class_recs[imagename]
            for i in range(len(class_obj['det'])):
                if not class_obj['det'][i]:
                    bbox = np.array(class_obj['bbox'][i])
                    # 保存R['det']==0的box信息, 即漏检的基准框, 用state=-1表示, 且overlaps记为2.0
                    row4 = OrderedDict(
                        [('ImgName', str(imagename)), ('ClsName', classname), ('score', float(2.0)),
                         ('xmin', bbox[0]), ('ymin', bbox[1]), ('xmax', bbox[2]),
                         ('ymax', bbox[3]), ('overlaps', float(2.0)), ('state', int(-1))])
                    dict_list.append(row4)

    # compute precision recall
    fp = np.cumsum(fp)  # 这里得到fp累积和, 长度与初始fp一样
    tp = np.cumsum(tp)

    # 打印TP,FP,FN
    teams_list = [1, 0]
    data = np.array([['TP={}'.format(tp[-1]), 'FP={}'.format(fp[-1])],
                     ['FN={}'.format(npos-tp[-1]), None]])
    row_format = "{:>15}" * (len(teams_list) + 1)
    print('')
    print('----------Confusion Matrix for {}--------------'.format(classname))
    print(row_format.format("", *teams_list))
    for team, row in zip(teams_list, data):
        print(row_format.format(team, *row))

    # 由于我们的样本中difficult都是0, 因此这里的npos为正值
    # npos表示gt_boxes的个数(已去掉标为difficult的object box), 即为recall公式的分母
    rec = tp / float(npos)
    # avoid divide by zero in case the first detection matches a difficult
    # ground truth
    prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)

    ap = voc_ap(rec, prec, use_voc_metric, use_07_metric)

    # if not use_voc_metric:
    print('Precision for {} = {:.4f}'.format(classname, prec[-1]))
    print('Recall for {} = {:.4f}'.format(classname, rec[-1]))
    print('')

    return rec, prec, ap
