import cv2
import numpy as np
import torch
import torchvision
from PIL import Image


def label_img_use_rect_with_label(img: torch.Tensor, boxes, labels):
    img = torchvision.utils.draw_bounding_boxes(img, boxes, width=3, labels=labels, colors="red",
                                                font_size=100)
    return torch.einsum("c h w -> h w c", img).numpy()


def label_img_use_rect(img: torch.Tensor, boxes):
    img = torchvision.utils.draw_bounding_boxes(img, boxes, width=3, colors="red",
                                                font_size=100)
    return torch.einsum("c h w -> h w c", img).numpy()


def label_img_use_point(img, boxes):
    img = torch.einsum("c h w -> h w c", img).numpy()
    for box in boxes:
        center_x = int((box[0] + box[2]) / 2)
        center_y = int((box[1] + box[3]) / 2)
        thinkness = int((box[2] - box[0]) / 2)
        img = cv2.circle(img, (center_x, center_y), radius=5,
                         color=(0, 255, 0), thickness=thinkness)
    return img


class SingleDetect(torch.nn.Module):
    def __init__(self, parent, path, device, dtype, imgsz) -> None:
        super().__init__()
        self.parent = parent
        self.imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
        assert isinstance(self.imgsz, tuple)
        self.tf = torchvision.transforms.Compose([
            torchvision.transforms.Resize(self.imgsz)
        ])
        self.model = torch.load(path)['model']
        self.model.eval()
        self.model.to(device=device, dtype=dtype)
        self.device = device
        self.dtype = dtype
        self.conf_thres = 0.29
        self.iou_thres = 0.29
        self.idx_to_class = self.parent.config.get('classes')
        self.show_message = None
        self.label_type = self.parent.config.get('label_type', 'rect')

    def warmup(self):
        img = torch.zeros((1, 3, 480, 640),
                          device=self.device, dtype=self.dtype)
        self.model(img)

    def process_input(self, img):
        if isinstance(img, str):  # 图片路径转PIL
            img = Image.open(img)
        if isinstance(img, Image.Image):  # PIL转numpy
            img = np.array(img)
        if isinstance(img, np.ndarray):  # numpy转Tensor
            img = torch.from_numpy(img)
            img = torch.einsum("h w c -> c h w", img)
        assert img.dim() == 3, "只支持单张图片推理"
        return img

    def forward(self, img):
        input_img = self.process_input(img)
        origin_img = input_img.clone()
        # c h w
        origin_image_height, origin_image_width = origin_img.shape[-2:]
        input_img = self.tf(input_img[None])
        input_img = input_img.to(device=self.device, dtype=self.dtype) / 255
        pred = self.model(input_img)
        #################################################
        # yolov8适配yolov5的输出
        if (pred[0].shape[1] == 4 + len(self.idx_to_class)):
            pred = list(pred)
            pred[0] = torch.permute(pred[0], (0, 2, 1))
            conf = torch.max(pred[0][:, :, 4:], dim=-
            1).values.reshape(1, -1, 1)
            pred[0] = torch.cat(
                (pred[0][:, :, :4], conf, pred[0][:, :, 4:]), dim=-1)

        dets = pred[0][0]
        boxes, confidences, class_ids = self.filter_box(
            dets, origin_image_width, origin_image_height)
        keep = self.nms(boxes, confidences)
        boxes = boxes[keep]
        confidences = confidences[keep]
        class_ids = class_ids[keep]
        ################################################

        if self.label_type == 'rect':
            img = label_img_use_rect(origin_img, boxes)
        elif self.label_type == 'point':
            img = label_img_use_point(origin_img, boxes)
        else:
            labels = [self.idx_to_class[i] for i in class_ids]
            img = label_img_use_rect_with_label(origin_img, boxes, labels)
        return img, origin_img

    def filter_box(self, dets, image_width, image_height):
        x_factor = image_width / self.imgsz[0]
        y_factor = image_height / self.imgsz[1]
        dets = dets[dets[:, 4] > self.conf_thres]  # 根据置信度过滤
        class_ids = dets[:, 5:].argmax(axis=1)
        confidences = dets[:, 4]
        boxes = dets[:, :4]
        # 左上角
        boxes[:, 0] = (boxes[:, 0] - 0.5 * boxes[:, 2]) * x_factor
        boxes[:, 1] = (boxes[:, 1] - 0.5 * boxes[:, 3]) * y_factor
        boxes[:, 2] = boxes[:, 0] + boxes[:, 2] * x_factor
        boxes[:, 3] = boxes[:, 1] + boxes[:, 3] * y_factor
        boxes = boxes.type(torch.int32)
        return boxes, confidences, class_ids

    def nms(self, boxes, scores):
        x1 = boxes[:, 0]  # 候选框x1
        y1 = boxes[:, 1]  # 候选框y1
        x2 = boxes[:, 2]  # 候选框x2
        y2 = boxes[:, 3]  # 候选框y2
        areas = (x2 - x1 + 1) * (y2 - y1 + 1)  # 候选框面积列表
        assert isinstance(scores, torch.Tensor)
        order = scores.argsort(descending=True)  # 按候选框置信度，从大到小排列
        keep = []  # 最终保留的候选框列表
        while order.numel() > 0:
            i = order[0]
            keep.append(i.item())  # 选出某一类中置信度最高的候选框BBox1
            xx1 = torch.maximum(x1[i], x1[order[1:]])
            yy1 = torch.maximum(y1[i], y1[order[1:]])
            xx2 = torch.minimum(x2[i], x2[order[1:]])
            yy2 = torch.minimum(y2[i], y2[order[1:]])
            w = torch.maximum(torch.tensor(0.), xx2 - xx1 + 1)
            h = torch.maximum(torch.tensor(0.), yy2 - yy1 + 1)
            inter = w * h
            ovr = inter / (areas[i] + areas[order[1:]] -
                           inter)  # 计算置信度最高的候选框与其余所有候选框的IOU
            # 若IOU(BBox1, BBox2)>thresh,说明BBox2与BBox1重叠度比较大，需要抛弃。
            inds = torch.where(ovr <= self.iou_thres)[0]
            order = order[inds + 1]
        return keep
