from torch.utils.data import DataLoader
from data.DataSet import VOC2012
import torch
import torchvision.transforms as transforms
import cv2
import data.DataSet
import matplotlib.pyplot as plt
from PIL import Image,ImageDraw,ImageFont


def NMS(bbox, conf_thresh=0.1, iou_thresh=0.3):
    def calculate_iou(bbox1, bbox2):
        """计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou"""
        intersect_bbox = [0., 0., 0., 0.]  # bbox1和bbox2的交集
        if bbox1[2] < bbox2[0] or bbox1[0] > bbox2[2] or bbox1[3] < bbox2[1] or bbox1[1] > bbox2[3]:
            pass
        else:
            intersect_bbox[0] = max(bbox1[0], bbox2[0])
            intersect_bbox[1] = max(bbox1[1], bbox2[1])
            intersect_bbox[2] = min(bbox1[2], bbox2[2])
            intersect_bbox[3] = min(bbox1[3], bbox2[3])

        area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])  # bbox1面积
        area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])  # bbox2面积
        area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1])  # 交集面积

        if area_intersect > 0:
            return area_intersect / (area1 + area2 - area_intersect)  # 计算iou
        else:
            return 0
    n = bbox.size()[0]
    bbox_prob = bbox[:, 5:].clone()  # 类别预测的条件概率
    bbox_confi = bbox[:, 4].clone().unsqueeze(1).expand_as(bbox_prob)  # 预测置信度
    bbox_cls_spec_conf = bbox_confi*bbox_prob  # 置信度*类别条件概率=cls-specific confidence score整合了是否有物体及是什么物体的两种信息
    bbox_cls_spec_conf[bbox_cls_spec_conf <= conf_thresh] = 0  # 将低于阈值的bbox忽略
    for c in range(20):
        rank = torch.sort(bbox_cls_spec_conf[:, c], descending=True).indices
        for i in range(98):
            if bbox_cls_spec_conf[rank[i], c] != 0:
                for j in range(i+1, 98):
                    if bbox_cls_spec_conf[rank[j], c] != 0:
                        iou = calculate_iou(bbox[rank[i], 0:4], bbox[rank[j], 0:4])
                        if iou > iou_thresh:  # 根据iou进行非极大值抑制抑制
                            bbox_cls_spec_conf[rank[j], c] = 0
    bbox = bbox[torch.max(bbox_cls_spec_conf, dim=1).values > 0]  # 将20个类别中最大的cls-specific confidence score为0的bbox都排除
    bbox_cls_spec_conf = bbox_cls_spec_conf[torch.max(bbox_cls_spec_conf, dim=1).values > 0]
    res = torch.ones((bbox.size()[0], 6))

    res[:, 1:5] = bbox[:, 0:4]  # 储存最后的bbox坐标信息
    res[:, 0] = torch.argmax(bbox[:, 5:], dim=1).int()  # 储存bbox对应的类别信息
    res[:, 5] = torch.max(bbox_cls_spec_conf, dim=1).values  # 储存bbox对应的class-specific confidence scores
    return res

# 将标签转化成bbox
def labels2bbox(matrix):
    if matrix.size()[0:2] != (7, 7):
        raise ValueError("Error: Wrong labels size:", matrix.size())
    bbox = torch.zeros((98, 25))     # 98个bbox，bbox信息加20class
    for i in range(7):
        for j in range(7):
            bbox[2 * (i * 7 + j), 0:4] = torch.tensor([
                (matrix[i, j, 0] + j) / 7 - matrix[i, j, 2] / 2,
                (matrix[i, j, 1] + i) / 7 - matrix[i, j, 3] / 2,
                (matrix[i, j, 0] + j) / 7 + matrix[i, j, 2] / 2,
                (matrix[i, j, 1] + i) / 7 + matrix[i, j, 3] / 2
            ])
            bbox[2 * (i * 7 + j), 4] = matrix[i, j, 4]
            bbox[2 * (i * 7 + j), 5:] = matrix[i, j, 10:]

            bbox[2 * (i * 7 + j) + 1, 0:4] = torch.tensor([
                (matrix[i, j, 5] + j) / 7 - matrix[i, j, 7] / 2,
                (matrix[i, j, 6] + i) / 7 - matrix[i, j, 8] / 2,
                (matrix[i, j, 5] + j) / 7 + matrix[i, j, 7] / 2,
                (matrix[i, j, 6] + i) / 7 + matrix[i, j, 8] / 2
            ])
            bbox[2 * (i * 7 + j) + 1, 4] = matrix[i, j, 9]
            bbox[2 * (i * 7 + j) + 1, 5:] = matrix[i, j, 10:]
    return NMS(bbox)

COLOR = [(255,0,0),(255,125,0),(255,255,0),(255,0,125),(255,0,250),
         (255,125,125),(255,125,250),(125,125,0),(0,255,125),(255,0,0),
         (0,0,255),(125,0,255),(0,125,255),(0,255,255),(125,125,255),
         (0,255,0),(125,255,125),(255,255,255),(100,100,100),(0,0,0),]  # 用来标识20个类别的bbox颜色，可自行设定


def draw_bbox(img, bbox, name):
    w, h = img.shape[-2:]
    img = transforms.ToPILImage()(img).convert("RGB")
    n = bbox.size()[0]
    for i in range(n):
        p1 = (w*bbox[i, 1], h*bbox[i, 2])
        p2 = (w*bbox[i, 3], h*bbox[i, 4])
        p = (w*bbox[i, 1], h*bbox[i, 2], w*bbox[i, 3], h*bbox[i, 4])
        clas_id = int(bbox[i, 0])
        cls_name = data.DataSet.CLASSES[clas_id]
        confidence = bbox[i, 5]
        draw = ImageDraw.Draw(img)
        draw.text(p1, cls_name+" con: "+str(confidence.data)[7:-1], fill=COLOR[clas_id])
        draw.rectangle(p, outline=COLOR[clas_id])
    img.save("./imageSave/"+name+".png")


if __name__ == "__main__":
    val_dataloader = DataLoader(VOC2012(is_train=False), batch_size=1, shuffle=False)
    model = torch.load("./checkpoint/epoch25.pkl")
    for i, (inputs, labels) in enumerate(val_dataloader):
        inputs = inputs.cuda()
        pred = model(inputs)  # pred的尺寸是(1,30,7,7)
        pred = pred.squeeze(dim=0)  # 压缩为(30,7,7)
        pred = pred.permute((1, 2, 0))  # 转换为(7,7,30)
        try:
            bbox = labels2bbox(pred)
        except:
            bbox = torch.tensor([[0, 0, 0, 0 ,0, 0]])
        img = inputs.cpu()
        img = img.squeeze(0)
        b = img[0]
        g = img[1]
        r = img[2]
        emp = torch.zeros(len(b))
        img[0] = r
        img[1] = b
        img[2] = g
        draw_bbox(img, bbox, str(i))

