import os
import shutil
import numpy as np
import json
import pandas as pd
from copy import deepcopy
from .cvio import cvio
from .mean_ap import eval_map

"""
计算GT和预测框的准确率，准确率 = 正确识别个数 / （错误识别个数+漏识别个数），
其中错误识别个数包括错误识别和多识别两种情况；
得到每张图片的识别率之后，取所有图片识别率的平均即为整体识别率。

目前仅支持labelme格式数据的评估。
"""


def load_json(src):
    with open(src, encoding='utf-8') as fp:
        return json.load(fp)


def matrix_iou(bboxes1, bboxes2):
    # bboxes2是需要去重的，bboxes1是作为判重参考标杆的
    iou_matrix = np.zeros((len(bboxes2), len(bboxes1)))
    for i in range(len(bboxes2)):
        bboxes = bboxes2[i][None, :]
        bboxes = np.repeat(bboxes, len(bboxes1), axis=0)
        x1 = np.concatenate((bboxes[:, 0:1], bboxes1[:, 0:1]), axis=1).max(1)
        y1 = np.concatenate((bboxes[:, 1:2], bboxes1[:, 1:2]), axis=1).max(1)
        x2 = np.concatenate((bboxes[:, 2:3], bboxes1[:, 2:3]), axis=1).min(1)
        y2 = np.concatenate((bboxes[:, 3:4], bboxes1[:, 3:4]), axis=1).min(1)
        dw = x2 - x1
        dw[dw < 0] = 0
        dh = y2 - y1
        dh[dh < 0] = 0
        inter = dw * dh
        areas1 = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
        areas2 = (bboxes1[:, 2] - bboxes1[:, 0]) * \
            (bboxes1[:, 3] - bboxes1[:, 1])
        union = areas1 + areas2 - inter
        #union = np.concatenate((areas1[:, None], areas2[:, None]), 1).min(1)
        iou = inter / union
        iou_matrix[i] = iou
    return iou_matrix.T


def ply2box(ply):
    ply = np.array(ply)
    xmin = ply[:, 0].min()
    xmax = ply[:, 0].max()
    ymin = ply[:, 1].min()
    ymax = ply[:, 1].max()
    return [xmin, ymin, xmax, ymax]


def get_bboxes(ann_info):
    shapes = ann_info['shapes']
    bboxes = []
    labels = []
    # _shapes = []
    for shape in shapes:
        points = shape['points']
        label = shape['label']
        # if 'daiding_101' in label or 'fake' in label:
        #     continue
        bboxes.append(ply2box(points))
        labels.append(label)
        # _shapes.append(shape)

    bboxes = np.array(bboxes)
    # shapes[:] = _shapes

    return dict(bboxes=bboxes, labels=labels)


def accuracy(gts, pts, iouthr=0.5):
    gt_bboxes = gts['bboxes']
    pt_bboxes = pts['bboxes']
    gt_labels = gts['labels']
    pt_labels = pts['labels']

    ioum = matrix_iou(gt_bboxes, pt_bboxes)
    n_true = 0
    n_error = 0
    n_miss = 0
    n_rest = 0
    
    # 错识别、漏识别、多识别都统统归于错误个数

    # 计算漏识别个数，正确个数，误识别个数
    for gt_idx, iou_row in enumerate(ioum):
        pt_idx = np.argmax(iou_row)
        iou_max = iou_row[pt_idx]
        if iou_max < iouthr:
            n_miss += 1
            continue
        gt_label = gt_labels[gt_idx]
        pt_label = pt_labels[pt_idx]
        if gt_label == pt_label:
            n_true += 1
        else:
            n_error += 1

    #计算多识别个数
    for pt_idx, iou_row in enumerate(ioum.T):
        gt_idx = np.argmax(iou_row)
        iou_max = iou_row[gt_idx]
        if iou_max < iouthr:
            n_rest += 1
            
    acc = n_true / (n_true + n_error + n_rest + n_miss)
    precision = n_true / (n_true+n_error+n_rest)
    recall = n_true / (n_true+n_error+n_miss)
    result = dict(accuracy=acc, precision=precision,
                  recall=recall, n_miss=n_miss, n_rest=n_rest, 
                  n_true=n_true, n_error=n_error)

    return result


class MeanAP:

    def __init__(self):
        self.classes = []
        self.gt_bboxes = []
        self.gt_labels = []
        self.gt_ignore = []
        self.pt_bboxes = []
        self.pt_labels = []

    def update_gt_pt_pair(self, gtinfo, ptinfo):
        gt_bboxes = gtinfo['bboxes']
        gt_labels = gtinfo['labels']
        self.classes.extend(gt_labels)
        gt_ignore = np.array([False for _ in gt_labels])
        self.gt_bboxes.append(gt_bboxes)
        self.gt_labels.append(gt_labels)
        self.gt_ignore.append(gt_ignore)

        pt_bboxes = ptinfo['bboxes']
        pt_labels = ptinfo['labels']
        self.classes.extend(pt_labels)
        self.pt_bboxes.append(pt_bboxes)
        self.pt_labels.append(pt_labels)
        
    def eval_map(self):
        classes = list(set(self.classes))
        classes.sort()
        cls2ids = {k:i for i, k in enumerate(classes,0)}
        
        for i, labels in enumerate(self.gt_labels):
            self.gt_labels[i] = np.array([cls2ids[l]+1 for l in labels], dtype=np.int64)

        tmp_result = [[] for _ in classes]
        score = 1.0
        results = []
        for bboxes, labels in zip(self.pt_bboxes, self.pt_labels):
            result = deepcopy(tmp_result)
            for i, cat in enumerate(labels):
                cat_id = cls2ids[cat]
                result[cat_id].append(list(bboxes[i]) + [score])

            for  i, res in enumerate(result):
                if len(res) > 0:
                    result[i] = np.array(result[i], dtype=np.float64)
                    continue
                result[i] = np.zeros((0,5), dtype=np.float64)
            results.append(result)
        
        return eval_map(results, self.gt_bboxes, self.gt_labels, self.gt_ignore,
                        scale_ranges=None, iou_thr=0.5, dataset=classes, print_summary=True)

def accuracy_per_image(gt_imgs, pt_imgs, log_acc=None):

    if log_acc:
        fp = {'图片': [], '正确': [], '错误': [], '漏检': [],
              '多检': [], '查全率': [], '查准率': [], '准确率': []}

    n_true = 0
    n_miss = 0
    n_rest = 0
    n_error = 0
    mean_ap = MeanAP()

    ngt = len(gt_imgs)
    npt = len(pt_imgs)
    nef = 0

    for i, (gt_img, pt_img) in enumerate(zip(gt_imgs, pt_imgs), 1):
        gtann = load_json(gt_img)
        if not len(gtann['shapes']):
            print('空标注（%s）.' % os.path.basename(gt_img))
            continue
        ptann = load_json(pt_img)
        if not len(ptann['shapes']):
            print('空预测（%s）.' % os.path.basename(pt_img))
            continue
        gtinfo = get_bboxes(gtann)
        if not len(gtinfo['bboxes']):
            print('空标注（%s）.' % os.path.basename(pt_img))
            continue
        ptinfo = get_bboxes(ptann)
        if not len(ptinfo['bboxes']):
            print('空预测（%s）.' % os.path.basename(pt_img))
            continue
        nef += 1
        mean_ap.update_gt_pt_pair(gtinfo, ptinfo)
        
        result = accuracy(gtinfo, ptinfo)
        acc = result['accuracy']
        recall = result['recall']
        precision = result['precision']
        n_true += result['n_true']
        n_miss += result['n_miss']
        n_rest += result['n_rest']
        n_error += result['n_error']
        print('[%d/%d] %s\n正确 %d 错误 %d 漏检 %d 多检 %d 查全率 %.2f 查准率 %.2f 准确率 %.2f' % (i, len(pt_imgs),
              os.path.basename(gt_img), result['n_true'], result['n_error'], result['n_miss'], result['n_rest'], recall, precision, acc))
        if log_acc:
            fp['准确率'].append(result['accuracy'])
            fp['查全率'].append(result['recall'])
            fp['查准率'].append(result['precision'])
            fp['正确'].append(result['n_true'])
            fp['错误'].append(result['n_error'])
            fp['漏检'].append(result['n_miss'])
            fp['多检'].append(result['n_rest'])
            fp['图片'].append(os.path.basename(gt_img))

    acc = n_true / (n_true + n_error + n_rest + n_miss)
    precision = n_true / (n_true+n_error+n_rest)
    recall = n_true / (n_true+n_error+n_miss)

    print('\n评估mAP')
    map_table = mean_ap.eval_map()

    if log_acc:
        fp['图片'].append('合计')
        fp['正确'].append(sum(fp['正确']))
        fp['多检'].append(sum(fp['多检']))
        fp['漏检'].append(sum(fp['漏检']))
        fp['错误'].append(sum(fp['错误']))
        fp['查全率'].append(recall)
        fp['查准率'].append(precision)
        fp['准确率'].append(acc)

        with pd.ExcelWriter(log_acc) as writer:
            fpacc = pd.DataFrame(fp)
            fpmap = pd.DataFrame(map_table)
            fpacc.to_excel(writer, sheet_name='评估结果', index=False)
            fpmap.to_excel(writer, sheet_name='mean AP', index=False)
    
    mAP = map_table['ap'][-1]
    print('\n人工标注 %d 模型预测 %d 有效评估 %d' % (ngt,npt,nef))
    print('平均 查全率 %.2f 查准率 %.2f 准确率 %.2f mAP %.2f' % (recall, precision, acc, mAP))

    return recall, precision, acc, mAP

# 加载gt和pt标注
def load_gtpt(gtsrc, ptsrc):
    gt_ann_list = []
    pt_ann_list = []
    for f in os.listdir(gtsrc):
        if not f.endswith('.json'):
            continue
        gtf = os.path.join(gtsrc, f)
        ptf = os.path.join(ptsrc, f)
        if not os.path.exists(ptf):
            print(ptf, '不存在!')
            continue
        gt_ann_list.append(gtf)
        pt_ann_list.append(ptf)
    return gt_ann_list, pt_ann_list

def accuracy_per_image_with_mask(gtsrc,ptsrc, outtxt=""):
    gt_anns, pt_anns = load_gtpt(gtsrc, ptsrc)
    if outtxt is None or outtxt == '':
        outtxt = os.path.abspath(os.path.join(gtsrc, '..', 'evaluation.xlsx'))
    accuracy_per_image(gt_anns, pt_anns, outtxt)
    print('评估结果保存至%s.' % outtxt)

def vision_wrong_instances(gtsrc, ptsrc, dst=''):
    iouthr = 0.5
    gt_anns, pt_anns = load_gtpt(gtsrc, ptsrc)
    if not len(gt_anns) or not len(pt_anns):
        print('未发现标注数据或预测数据!')
        return
    if dst is None or dst == '':
        dst = os.path.abspath(os.path.join(gtsrc, '..', 'wrong'))
    if not os.path.exists(dst):
        os.makedirs(dst)
    c = 0
    for i, (gt_img, pt_img) in enumerate(zip(gt_anns, pt_anns), 1):
        shapes = []
        gtann = load_json(gt_img)
        if not len(gtann['shapes']):
            print(gt_img, '空标注!')
            continue
        ptann = load_json(pt_img)
        if not len(ptann['shapes']):
            print(gt_img, '空预测!')
            continue
        ptann = load_json(pt_img)
        if not len(ptann['shapes']):
            print('空预测（%s）.' % os.path.basename(pt_img))
            continue
        gtinfo = get_bboxes(gtann)
        if not len(gtinfo['bboxes']):
            print('空标注（%s）.' % os.path.basename(pt_img))
            continue
        ptinfo = get_bboxes(ptann)
        if not len(ptinfo['bboxes']):
            print('空预测（%s）.' % os.path.basename(pt_img))
            continue
        gt_bboxes = gtinfo['bboxes']
        pt_bboxes = ptinfo['bboxes']
        gt_labels = gtinfo['labels']
        pt_labels = ptinfo['labels']

        ioum = matrix_iou(gt_bboxes, pt_bboxes)
        for gt_idx, iou_row in enumerate(ioum):
            pt_idx = np.argmax(iou_row)
            iou_max = iou_row[pt_idx]
            gt_label = gt_labels[gt_idx]
            pt_label = pt_labels[pt_idx]
            if iou_max < iouthr:
                shape = deepcopy(gtann['shapes'][gt_idx])
                shape['label'] = '[%s][]' % (gt_label)
                shapes.append(shape)
                continue

            if gt_label != pt_label:
                shape = deepcopy(gtann['shapes'][gt_idx])
                shape['label'] = '[%s][%s]' % (gt_label, pt_label)
                shapes.append(shape)

        #计算多识别个数
        for pt_idx, iou_row in enumerate(ioum.T):
            gt_idx = np.argmax(iou_row)
            iou_max = iou_row[gt_idx]
            gt_label = gt_labels[gt_idx]
            pt_label = pt_labels[pt_idx]
            if iou_max < iouthr:
                shape = deepcopy(ptann['shapes'][pt_idx])
                shape['label'] = '[][%s]' % (pt_label)
                shapes.append(shape)
        if not len(shapes): continue
        c += 1
        newann = deepcopy(gtann)
        newann['shapes'] = shapes
        dstann = os.path.join(dst, os.path.basename(gt_img))
        cvio.write_ann(newann, dstann)
        imgname = gtann['imagePath']
        srcimg = os.path.join(gtsrc, imgname)
        if os.path.exists(srcimg):
            shutil.copy(srcimg, os.path.join(dst, imgname))
        print('[%d/%d] Save %s.' % (c, i, dstann))


if __name__ == '__main__':
    gt_src = 'test0723/gt'
    pt_src = 'test0723/pt'
    accuracy_per_image_with_mask(gt_src, pt_src, 'evaluation.xlsx')
