import torch
import utils.object_detection as object_detection


class YOLOLoss(torch.nn.Module):
    def __init__(self, anchors, c, strides, image_size, device):
        super(YOLOLoss, self).__init__()

        self.anchors = anchors
        self.c = c
        self.n = 5 + c
        self.m = 3  # 先验框个数
        self.image_size = image_size
        self.threshold = 0.5
        self.strides = strides

        self.device = device

        self.offsets = self.grids(strides)

        self.BCELoss = torch.nn.BCELoss()
        self.MSELoss = torch.nn.MSELoss()

    def grids(self, strides):
        offsets = []
        for s in strides:
            cell_offsets_w = torch.arange(0, s)
            cell_offsets_w = cell_offsets_w.repeat(s, 1)
            cell_offsets_h = torch.arange(0, s)
            cell_offsets_h = cell_offsets_h.reshape(s, 1).repeat(1, s)
            cell_offsets = torch.stack([cell_offsets_w, cell_offsets_h], dim=2). \
                view(1, s, s, 2).repeat((self.m, 1, 1, 1)).to(self.device)

            offsets.append(cell_offsets)
        return offsets

    def forward(self, X, labels):
        loss_sum = 0.
        for i, x in enumerate(X):
            anchor = self.anchors[i]
            offset = self.offsets[i]
            stride = self.strides[i]
            cn = x.size(0)
            w, h = x.shape[2:4]
            stride_h = self.image_size[1] // h
            stride_w = self.image_size[0] // w
            stride_wh = torch.tensor([stride_w, stride_h], device=self.device)
            scaled_anchor = anchor / stride_wh

            p = x.view(cn, self.m, self.n, h, w).permute(0, 1, 3, 4, 2).contiguous()

            xy = torch.sigmoid(p[..., 0:2])
            wh = p[..., 2:4]
            confidence = torch.sigmoid(p[..., 4])
            classes = torch.sigmoid(p[..., 5:])

            truths, no_obj_masks, loss_scale, y = self.transform_labels(labels=labels, anchors=scaled_anchor, s=stride)

            no_obj_masks = self.second_update_masks(p.detach(), y, scaled_anchor, stride, no_obj_masks, offset)

            loss_scale = 2. - loss_scale

            loss_xy = self.BCELoss(xy, truths[..., 0:2]) * loss_scale * truths[..., 4]

            loss_wh = self.MSELoss(wh, truths[..., 2:4]) * loss_scale * truths[..., 4]

            loss_confidence = self.BCELoss(confidence, truths[..., 4]) * truths[..., 4] + self.BCELoss(confidence,
                                                                                                       truths[
                                                                                                           ..., 4]) * no_obj_masks

            # loss_classes = self.BCELoss(classes[truths[..., 4] == 1], truths[..., 5:][truths[..., 4] == 1])

            loss = loss_xy.sum() + loss_wh.sum() + loss_confidence.sum()
            # + loss_classes.sum()

            loss_sum += loss

        return loss_sum / len(X)

    def transform_labels(self, labels, anchors, s):

        cn = len(labels)

        no_obj_masks = torch.ones(cn, self.m, s, s, requires_grad=False, device=self.device)

        truths = torch.zeros(cn, self.m, s, s, self.n, requires_grad=False, device=self.device)

        loss_scale = torch.zeros(cn, self.m, s, s, requires_grad=False, device=self.device)

        y = []
        for b in range(cn):
            label = labels[b]
            if label.size(0) == 0:
                continue

            cn_y = torch.zeros(label.shape, device=label.device)

            cn_y[:, 0] = label[:, 0] * s
            cn_y[:, 2] = label[:, 2] * s

            cn_y[:, 1] = label[:, 1] * s
            cn_y[:, 3] = label[:, 3] * s

            cn_y[:, 4] = label[:, 4]

            y.append(cn_y)

            no_xy_box = object_detection.bbox_position_cast(
                torch.cat([torch.zeros(label.size(0), 2, device=cn_y.device), cn_y[:, 2:4]], dim=1), 0)

            no_xy_anchor = object_detection.bbox_position_cast(
                torch.cat([torch.zeros(anchors.size(0), 2, device=anchors.device), anchors], dim=1), 0)
            best_anchors_idx = torch.argmax(object_detection.iou(no_xy_box, no_xy_anchor, mode='joint'), dim=1)
            for t, best_idx in enumerate(best_anchors_idx):
                j = torch.floor(cn_y[t, 0]).long()
                i = torch.floor(cn_y[t, 1]).long()

                # c = cn_y[t, 4]

                no_obj_masks[b, best_idx, i, j] = 0

                truths[b, best_idx, i, j, 0] = cn_y[t, 0] - j.float()
                truths[b, best_idx, i, j, 1] = cn_y[t, 1] - i.float()
                truths[b, best_idx, i, j, 2] = torch.log(cn_y[t, 2] / anchors[best_idx][0] + 1e-16)
                truths[b, best_idx, i, j, 3] = torch.log(cn_y[t, 3] / anchors[best_idx][1] + 1e-16)
                truths[b, best_idx, i, j, 4] = 1.
                truths[b, best_idx, i, j, cn_y[t, 4].long() + 5] = 1.
                loss_scale[b, best_idx, i, j] = cn_y[t, 2] * cn_y[t, 3] / (s * s)

        # 真实框，掩码, 损失权重
        return truths, no_obj_masks, loss_scale, y

    def second_update_masks(self, p, y, anchor: torch.Tensor, s, no_obj_masks, offset):
        cn = p.size(0)
        cn_offset = offset.view(1, self.m, s, s, 2).repeat(cn, 1, 1, 1, 1)

        anchor_wh = anchor.view(1, anchor.size(0), 1, 1, anchor.size(1)).repeat(cn, 1, s, s, 1)
        decoded_p_xy = torch.sigmoid(p[..., 0:2]) + cn_offset
        decoded_p_wh = torch.exp(p[..., 2:4]) * anchor_wh
        decoded_p_boxes = torch.cat([decoded_p_xy, decoded_p_wh], dim=-1)

        for b in range(cn):

            if y[b].size(0) == 0:
                continue
            y_b = y[b]

            iou = object_detection.iou(object_detection.bbox_position_cast(decoded_p_boxes[b], 0),
                                       object_detection.bbox_position_cast(y_b[:, 0:4], 0))

            iou_max, index = torch.max(iou, dim=1)

            iou_max = iou_max.view(decoded_p_boxes[b].shape[0:3])
            no_obj_masks[b][iou_max > self.threshold] = 0

        return no_obj_masks


class YOLOInfer(YOLOLoss):
    def __init__(self, anchors, c, strides, image_size, device):
        super().__init__(anchors, c, strides, image_size, device)

    @torch.no_grad()
    def forward(self, X, labels=None):
        completed_boxes = []

        completed_confidences = []

        completed_classes = []

        for i, x in enumerate(X):
            anchor = self.anchors[i]
            offset = self.offsets[i]
            stride = self.strides[i]

            cn = x.size(0)

            w, h = x.shape[2:4]
            stride_h = self.image_size[1] // h
            stride_w = self.image_size[0] // w

            stride_wh = torch.tensor([stride_w, stride_h], device=self.device)
            scaled_anchor = anchor / stride_wh
            scaled_anchor = scaled_anchor.to(self.device)

            cn_offset = offset.view(1, self.m, stride, stride, 2).repeat(cn, 1, 1, 1, 1)
            # print(cn_offset[0,0])
            p = x.view(cn, self.m, self.n, h, w).permute(0, 1, 3, 4, 2).contiguous()

            xy = torch.sigmoid(p[..., 0:2])
            decoded_xy = (xy + cn_offset) * stride_wh

            wh = p[..., 2:4]

            decoded_wh = torch.clone(wh)

            decoded_wh[:, 0, :, :, :] = stride_wh * scaled_anchor[0] * torch.exp(wh[:, 0, :, :, :])

            decoded_wh[:, 1, :, :, :] = stride_wh * scaled_anchor[1] * torch.exp(wh[:, 1, :, :, :])

            decoded_wh[:, 2, :, :, :] = stride_wh * scaled_anchor[2] * torch.exp(wh[:, 2, :, :, :])

            boxes = torch.cat([decoded_xy.view(-1, 2), decoded_wh.view(-1, 2)], dim=1)

            confidence = torch.sigmoid(p[..., 4])

            classes = torch.sigmoid(p[..., 5:])

            class_res = torch.argmax(classes.view(-1, self.c), dim=1)

            completed_boxes.append(boxes.view(-1, 4))
            completed_confidences.append(confidence.view(-1, 1))
            completed_classes.append(class_res.view(-1, 1))

        return torch.cat(completed_boxes, dim=0), torch.cat(completed_confidences, dim=0), torch.cat(completed_classes,
                                                                                                     dim=0)
