import math
import warnings
from pathlib import Path
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from dataset.dataloader import CocoDataset, SimpleDetDataset
from tqdm import tqdm
import torchvision.ops as ops

def box_iou(box1, box2, eps=1e-7):
    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
    """
    Return intersection-over-union (Jaccard index) of boxes.
    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
    Arguments:
        box1 (Tensor[N, 4])
        box2 (Tensor[M, 4])
    Returns:
        iou (Tensor[N, M]): the NxM matrix containing the pairwise
            IoU values for every element in boxes1 and boxes2
    """

    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
    (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
    inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)

    # IoU = inter / (area1 + area2 - inter)
    return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)


def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
    # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)

    # Get the coordinates of bounding boxes
    if xywh:  # transform from xywh to xyxy
        (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
        w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
        b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
        b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
    else:  # x1, y1, x2, y2 = box1
        b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
        b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
        w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
        w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps

    # Intersection area
    inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \
            (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0)

    # Union Area
    union = w1 * h1 + w2 * h2 - inter + eps

    # IoU
    iou = inter / union
    if CIoU or DIoU or GIoU:
        cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1)  # convex (smallest enclosing box) width
        ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1)  # convex height
        if CIoU or DIoU:  # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
            c2 = cw ** 2 + ch ** 2 + eps  # convex diagonal squared
            rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4  # center dist ** 2
            if CIoU:  # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
                v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
                with torch.no_grad():
                    alpha = v / (v - iou + (1 + eps))
                return iou - (rho2 / c2 + v * alpha)  # CIoU
            return iou - rho2 / c2  # DIoU
        c_area = cw * ch + eps  # convex area
        return iou - (c_area - union) / c_area  # GIoU https://arxiv.org/pdf/1902.09630.pdf
    return iou  # IoU

# iou = overlap_area / union_area
# input boxes shape = [box_num, 4]
# box representation = xyxy
def boxes_iou(boxes, device=torch.device('cpu')):
    num = boxes.shape[0]
    # resault = torch.zeros(size=(num, num), device=device).float()
    resault = torch.zeros(size=(num, num)).float().to(device)
    for i in range(num):
        x1, y1, x2, y2 = boxes[i]
        xmax = max(x1, x2)
        xmin = min(x1, x2)
        ymax = max(y1, y2)
        ymin = min(y1, y2)
        self_area = (xmax - xmin) * (ymax - ymin)
        resault[i, i] = 1.0
        for j in range(num - i - 1):
            _x1, _y1, _x2, _y2 = boxes[i + j + 1]
            _xmax = max(_x1, _x2)
            _xmin = min(_x1, _x2)
            _ymax = max(_y1, _y2)
            _ymin = min(_y1, _y2)
            _area = (_xmax - _xmin) * (_ymax - _ymin)
            overlap = max(min(xmax, _xmax) - max(xmin, _xmin), 0) * max(min(ymax, _ymax) - max(ymin, _ymin), 0)
            resault[i, i + j + 1] = float(overlap) / (float(self_area + _area - overlap) + 0.0001)
            resault[i + j + 1, i] = resault[i, i + j + 1]
    return resault

# 交集比上最小框面积，能够去除相互包含的框
def min_boxes_iou(boxes, device=torch.device('cpu')):
    num = boxes.shape[0]
    # resault = torch.zeros(size=(num, num), device=device).float()
    resault = torch.zeros(size=(num, num)).float().to(device)
    for i in range(num):
        x1, y1, x2, y2 = boxes[i]
        xmax = max(x1, x2)
        xmin = min(x1, x2)
        ymax = max(y1, y2)
        ymin = min(y1, y2)
        self_area = (xmax - xmin) * (ymax - ymin)
        resault[i, i] = 1.0
        for j in range(num - i - 1):
            _x1, _y1, _x2, _y2 = boxes[i + j + 1]
            _xmax = max(_x1, _x2)
            _xmin = min(_x1, _x2)
            _ymax = max(_y1, _y2)
            _ymin = min(_y1, _y2)
            _area = (_xmax - _xmin) * (_ymax - _ymin)
            overlap = max(min(xmax, _xmax) - max(xmin, _xmin), 0) * max(min(ymax, _ymax) - max(ymin, _ymin), 0)
            resault[i, i + j + 1] = float(overlap) / (float(min(self_area,_area)) + 0.0001)
            resault[i + j + 1, i] = resault[i, i + j + 1]
    return resault

def boxes_giou(boxes, device=torch.device('cpu')):
    num = boxes.shape[0]
    resault = torch.zeros(size=(num, num), device=device).float()
    for i in range(num):
        x1, y1, x2, y2 = boxes[i]
        xmax = max(x1, x2)
        xmin = min(x1, x2)
        ymax = max(y1, y2)
        ymin = min(y1, y2)
        self_area = (xmax - xmin) * (ymax - ymin)
        for j in range(num - i - 1):
            _x1, _y1, _x2, _y2 = boxes[j + i + 1]
            _xmax = max(_x1, _x2)
            _xmin = min(_x1, _x2)
            _ymax = max(_y1, _y2)
            _ymin = min(_y1, _y2)
            _area = (_xmax - _xmin) * (_ymax - _ymin)
            overlap = max(min(xmax, _xmax) - max(xmin, _xmin), 0) * max(min(ymax, _ymax) - max(ymin, _ymin), 0)
            iou = float(overlap) / float(self_area + _area - overlap)
            bbox_area = (max(xmax, _xmax) - min(xmin, _xmin)) * (max(ymax, _ymax) - min(ymin, _ymin))
            union_area = self_area + _area - overlap
            resault[i, j + i + 1] = iou - float(abs(bbox_area - union_area)) / float(bbox_area)
            resault[j + i + 1, i] = resault[i, j + i + 1]
    return resault


def resize_image(image, size, letterbox_image):
    iw, ih = image.size
    w, h = size
    if letterbox_image:
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)

        image = image.resize((nw, nh), Image.BICUBIC)
        new_image = Image.new('RGB', size, (128, 128, 128))
        new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
    else:
        new_image = image.resize((w, h), Image.BICUBIC)
    return new_image


# 简单实现NMS
# input boxes shape = [box_num, 5],
# boxes[0] = [x1, y1, x2, y2, score, cls]
# max_objs 一张图里的最多目标个数

def NMS(boxes, iou_threshold=0.5, scores_threshold=0.5, max_objs=50, device=torch.device('cpu')): #nms的device设为cpu比cuda要快
    num, _ = boxes.shape
    pos, scores, cls = boxes.split((4, 1, 1), -1)
    scores = scores.squeeze()
    mask = scores.ge(scores_threshold)
    if mask.sum() > max_objs:
        _, indices = scores.sort()
        mask = torch.zeros(scores.shape).bool()
        mask[indices[0:max_objs]] = True
    pos = pos[mask, :]
    select_scores = scores[mask]
    select_cls = cls[mask]
    mask = mask[mask]
    iou_table = boxes_iou(pos, device)

    resault_box = []
    score_resault_box = []
    cls_resault_box = []
    while mask.sum():
        # fetch_scores = torch.zeros(select_scores.shape)
        fetch_scores = select_scores * mask
        index = torch.argmax(fetch_scores)
        iou_relate = iou_table[index]

        mask = mask * iou_relate.lt(iou_threshold)
        resault_box.append(pos[index])
        score_resault_box.append(select_scores[index])
        cls_resault_box.append(select_cls[index])
        # print('select score={}'.format(boxes[index]))
    # return resault_box, score_resault_box, cls_resault_box
    return torch.stack(resault_box, 0) if len(resault_box) else torch.tensor(resault_box), torch.tensor(score_resault_box), torch.tensor(cls_resault_box)


# 可以用来筛出包含的框
def min_NMS(boxes, iou_threshold=0.5, scores_threshold=0.5, device=torch.device('cpu')): #nms的device设为cpu比cuda要快
    num, _ = boxes.shape
    pos, scores, cls = boxes.split((4, 1, 1), -1)
    scores = scores.squeeze()
    mask = scores.ge(scores_threshold)

    pos = pos[mask, :]
    select_scores = scores[mask]
    select_cls = cls[mask]
    mask = mask[mask]
    iou_table = min_boxes_iou(pos, device)

    resault_box = []
    score_resault_box = []
    cls_resault_box = []
    while mask.sum():
        # fetch_scores = torch.zeros(select_scores.shape)
        fetch_scores = select_scores * mask
        index = torch.argmax(fetch_scores)
        iou_relate = iou_table[index]

        mask = mask * iou_relate.lt(iou_threshold)
        resault_box.append(pos[index])
        score_resault_box.append(select_scores[index])
        cls_resault_box.append(select_cls[index])
        # print('select score={}'.format(boxes[index]))
    # return torch.stack(resault_box, 0)
    # return resault_box, score_resault_box, cls_resault_box
    return torch.stack(resault_box, 0) if len(resault_box) else torch.tensor(resault_box), torch.tensor(score_resault_box), torch.tensor(cls_resault_box)

# 收集simpledetdataset的预测结果
def collect_simpledetdataset_resault(dataset_val, model):
    model.eval()
    with torch.no_grad():
        resault = []
        images_ids = []
        for index in range(len(dataset_val)):
            img, boxes = dataset_val[index]
            pre = model(img.unsqueeze(0)).detach().cpu().squeeze(0).permute(1, 0)
            bbox, cls_pref = pre.split((4, dataset_val.num_classes()), 1)
            cls_conf, cls_pre = torch.max(cls_pref, 1, keepdim=True)
            dbox = torch.concat((bbox, cls_conf, cls_pre), dim=1)
            bbox, cls_conf, cls_pre = NMS(dbox, scores_threshold=0.5, device=torch.device('cpu'))
            image_id = dataset_val.images_names[index]
            resault.append({
                'image_id': image_id,
                'boxes': bbox,
                'scores': cls_conf,
                'labels': cls_pre
            })
            images_ids.append(image_id)
        return resault, images_ids

def collect_simpledetdataloader_resault(dataloader_val, num_class, model, device=torch.device('cuda')):
    model.eval()
    with torch.no_grad():
        resault = []
        gt = []
        raw_pre = []
        raw_annot = []
        # bar = tqdm(dataloader_val)
        # for img, annot in bar:
        for img, annot in dataloader_val:
            pre = model(img.to(device).float()).detach().cpu().permute(0, 2, 1)

            bbox, cls_pref = pre.split((4, num_class), 2)
            cls_conf, cls_pre = torch.max(cls_pref, 2, keepdim=True)
            dbox = torch.concat((bbox, cls_conf, cls_pre), dim=2)
            sq_cls_conf = cls_conf.squeeze(dim=-1)
            sq_cls_pre = cls_pre.squeeze(dim=-1)
            for i, box in enumerate(dbox):
                indices = torch.where(annot[:, 0] == i)[0]
                gt.append({'boxes': annot[indices, 2:],'labels': annot[indices, 1].int()})
                bbox, cls_conf, cls_pre = NMS(box, scores_threshold=0.5, device=torch.device('cpu'))

                # nms_selected = ops.batched_nms(bbox[i, :], sq_cls_conf[i, :], sq_cls_pre[i, :], 0.5)

                resault.append({'boxes': bbox,'scores': cls_conf,'labels': cls_pre.int()})
                
        # bar.close()
        return resault, gt
# 计算每个类别的PR曲线：
# P = TP / (TP + FP)
# R = TP / (TP + FN)
# 按固定iou阈值得到每张图片的每个类别的TP、FP、FN
# 计算所有图片，得到整个验证集每个类别在该iou阈值下的TP、FP、FN
# 基于iou的TP、FP、FN再计算动态的TP、FP、FN得到PR曲线    

# 对于每张图片
def get_mAP(dataset_val, resualt_dict, iou_threshold=0.5):
    device=torch.device('cpu')
    with torch.no_grad():
        pass
        resualt_dict_ImgIdx = {}
        for d in resualt_dict:
            resualt_dict_ImgIdx[d['image_id']]={k:v for k, v in d.items() if k != 'image_id'}
        tps = []                          #True Positive 将正类预测为正类
        base_fps = []                     #False Positive将负类预测为正类
        base_fns = []                     #False Negative将正类预测为负类
        total_cls_list = []
        for _ in dataset_val.classes:
            tps.append([])
            base_fps.append(0)
            base_fns.append(0)

        for index in range(len(dataset_val)):
            img, label_boxes = dataset_val[index]
            name = dataset_val.images_names[index]
            pre_dict = resualt_dict_ImgIdx[name]
            pre_boxes = pre_dict['boxes']
            pre_scores = pre_dict['scores']
            pre_cls = pre_dict['labels']
            _cls_list = []
            for box in label_boxes:
                if not int(box[1]) in _cls_list:
                    _cls_list.append(int(box[1]))
                if not int(box[1]) in total_cls_list:
                    total_cls_list.append(int(box[1]))

            for cls in _cls_list:
                select_mask = pre_cls == cls
                _pre_boxes = pre_boxes[select_mask]
                _pre_scores = pre_scores[select_mask]
                _label_boxes = label_boxes[label_boxes[:, 1] == cls]
                total_mask = torch.zeros(_pre_boxes.shape)
                for box in _label_boxes:
                    # 为一个GT框匹配一个pre box（有多个pre box时取score最高的），如果匹配不上，那么补零
                    iou_list = [box_iou(box, _box) for _box in _pre_boxes]
                    iou_mask = iou_list >= iou_threshold
                    if iou_mask.sum() == 0:
                        base_fns[cls] += 1
                        continue
                    total_mask |= iou_mask
                    select_scores = _pre_scores[iou_mask]
                    score, _ = torch.max(select_scores, keepdim=True)
                    tps[cls].append(score)
                base_fps[cls] += (1 - total_mask).sum()

        tps = torch.tensor(tps)
        APs = [0 for _ in dataset_val.classes]
        for cls in total_cls_list:
            tp_scores = tps[cls]
            base_fp = base_fps[cls]
            base_fn = base_fps[cls]
            sorted_tp, indices = torch.sort(tp_scores)
            PRPoint = []
            for score_threshold in sorted_tp:
                tp = tp_scores.gt(score_threshold).sum()
                fp = base_fp
                fn = base_fn + tp_scores.lt(score_threshold).sum()
                PRPoint.append(tp / (tp + fp), tp / (tp + fn))
            PRPoint = np.array(PRPoint)
            ap = 0
            if PRPoint.shape[0] > 11:                  #取11个采样点
                PRPoint = PRPoint[np.linspace(start=0, stop=PRPoint.shape[0], num=11), :]
            APs[cls] = np.mean(PRPoint[:, 0])
        mAP = np.mean(np.array(APs[np.where(APs > 0)]))
        return mAP
