"""
YoloV3 Head Definition
"""

import torch
import torch.nn as nn
import numpy as np
import math
from vortex.utils.bbox import find_jaccard_overlap, non_max_suppression

# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class ConvBnLeaky(nn.Module):
    def __init__(self, inplanes, outplanes, kernel_size):
        super(ConvBnLeaky, self).__init__()
        padding = (kernel_size - 1) // 2
        self.conv = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, 
                              stride=1, padding=padding, bias=False)
        self.bn = nn.BatchNorm2d(outplanes)
        self.relu = nn.LeakyReLU(0.1)
    
    def forward(self, x):
        output = self.conv(x)
        output = self.bn(output)
        output = self.relu(output)
        return output


class ConvBnLeakyEmbedding(nn.Module):
    """
    a stack of 5 cbl layers, and additional cbl + conv
    """
    def __init__(self, filters, inplanes, outplanes):
        super(ConvBnLeakyEmbedding, self).__init__()

        self.conv1 = ConvBnLeaky(inplanes, filters[0], 1)
        self.conv2 = ConvBnLeaky(filters[0], filters[1], 3)
        self.conv3 = ConvBnLeaky(filters[1], filters[0], 1)
        self.conv4 = ConvBnLeaky(filters[0], filters[1], 3)
        self.conv5 = ConvBnLeaky(filters[1], filters[0], 1)
        self.conv6 = ConvBnLeaky(filters[0], filters[1], 3)
        self.conv7 = nn.Conv2d(filters[1], outplanes, kernel_size=1, stride=1, padding=0, bias=True)
    
    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = self.conv3(out)
        out = self.conv4(out)
        out1 = self.conv5(out)
        out2 = self.conv6(out1)
        out2 = self.conv7(out2)
        return out2, out1


class YoloV3Head(nn.Module):
    def __init__(self, num_classes, num_anchors=[3, 3, 3], input_channels=[1024, 512, 256]):
        super(YoloV3Head, self).__init__()
        """
        0 for deepest output.
        """
        self.num_classes = num_classes

        filters0 = num_anchors[0] * (5 + self.num_classes)
        filters1 = num_anchors[1] * (5 + self.num_classes)
        filters2 = num_anchors[2] * (5 + self.num_classes)
        
        self.embedding0 = ConvBnLeakyEmbedding([512, 1024], input_channels[0], filters0)
        self.cbl1 = ConvBnLeaky(512, 256, 1)
        self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
        self.embedding1 = ConvBnLeakyEmbedding([256, 512], input_channels[1] + 256, filters1)
        self.cbl2 = ConvBnLeaky(256, 128, 1)
        self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
        self.embedding2 = ConvBnLeakyEmbedding([128, 256], input_channels[2] + 128, filters2)

    def forward(self, inputs):
        x2, x1, x0 = inputs

        out0, out0_branch = self.embedding0(x0)
        out0_branch = self.cbl1(out0_branch)
        out0_branch = self.upsample1(out0_branch)
        x1 = torch.cat([out0_branch, x1], 1)
        out1, out1_branch = self.embedding1(x1)
        out1_branch = self.cbl2(out1_branch)
        out1_branch = self.upsample2(out1_branch)
        x2 = torch.cat([out1_branch, x2], 1)
        out2, _ = self.embedding2(x2)

        return out0, out1, out2


class YoloDetect(nn.Module):
    """
    parse outputs for single scale of feature map
    """
    def __init__(self, anchors, num_classes, img_size):
        super(YoloDetect, self).__init__()
        self.anchors = anchors
        self.num_anchors = len(anchors)
        self.num_classes = num_classes
        self.bbox_attrs = 5 + num_classes
        self.img_size = img_size

    def forward(self, inputs):
        bs = inputs.size(0)
        in_h = inputs.size(2)
        in_w = inputs.size(3)

        stride_h = self.img_size[0] / in_h  # proportional to size of receptive field.
        stride_w = self.img_size[1] / in_w

        scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in self.anchors]  # anchor in # of feature map pixels
        scaled_anchors = torch.FloatTensor(scaled_anchors)
        predictions = inputs.view(bs, self.num_anchors, self.bbox_attrs, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()

        x = torch.sigmoid(predictions[..., 0])
        y = torch.sigmoid(predictions[..., 1])
        w = predictions[..., 2]
        h = predictions[..., 3]
        conf = torch.sigmoid(predictions[..., 4])
        pred_cls = torch.sigmoid(predictions[..., 5:])  # why not softmax??

        # do boxe inference.
        FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
        LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
        # Calculate offsets for each grid
        grid_x = torch.linspace(0, in_w-1, in_w).repeat(in_w, 1).repeat(
            bs * self.num_anchors, 1, 1).view(x.shape).type(FloatTensor)
        grid_y = torch.linspace(0, in_h-1, in_h).repeat(in_h, 1).t().repeat(
            bs * self.num_anchors, 1, 1).view(y.shape).type(FloatTensor)
        # Calculate anchor w, h
        anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
        anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
        anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)
        anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)
        # Add offset and scale with anchors
        pred_boxes = FloatTensor(predictions[..., :4].shape)
        pred_boxes[..., 0] = x.data + grid_x
        pred_boxes[..., 1] = y.data + grid_y
        pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
        pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
        # Results
        _scale = torch.Tensor([stride_w, stride_h] * 2).type(FloatTensor)
        output = torch.cat((pred_boxes.view(bs, -1, 4) * _scale,
                            conf.view(bs, -1, 1), pred_cls.view(bs, -1, self.num_classes)), -1)
        return output.data


class YoloLoss(nn.Module):
    """
    loss for a single scale of feature map
    """
    def __init__(self, anchors, num_classes, img_size):
        super(YoloLoss, self).__init__()
        self.anchors = anchors
        self.num_anchors = len(anchors)
        self.num_classes = num_classes
        self.bbox_attrs = 5 + num_classes
        self.img_size = img_size

        # parameters
        self.ignore_threshold = 0.5
        self.lambda_xy = 2.5
        self.lambda_wh = 2.5
        self.lambda_conf = 1.0
        self.lambda_cls = 1.0

        self.mse_loss = nn.MSELoss()
        self.bce_loss = nn.BCELoss()

    def forward(self, inputs, boxes, labels):
        device = inputs.device

        bs = inputs.size(0)
        in_h = inputs.size(2)
        in_w = inputs.size(3)

        stride_h = self.img_size[0] / in_h  # proportional to size of receptive field.
        stride_w = self.img_size[1] / in_w

        scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in self.anchors]  # anchor in # of feature map pixels
        scaled_anchors = torch.FloatTensor(scaled_anchors)
        predictions = inputs.view(bs, self.num_anchors, self.bbox_attrs, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()

        x = torch.sigmoid(predictions[..., 0])
        y = torch.sigmoid(predictions[..., 1])
        w = predictions[..., 2]
        h = predictions[..., 3]
        conf = torch.sigmoid(predictions[..., 4])
        pred_cls = torch.sigmoid(predictions[..., 5:])  # why not softmax??

        # calculate loss 
        mask, noobj_mask, tx, ty, tw, th, tconf, tcls = \
            self.get_targets(boxes, labels, scaled_anchors, in_w, in_h, self.ignore_threshold, device)
        loss_x = self.bce_loss(x * mask, tx * mask)
        loss_y = self.bce_loss(y * mask, ty * mask)
        loss_w = self.mse_loss(w * mask, tw * mask)
        loss_h = self.mse_loss(h * mask, th * mask)
        loss_conf = self.bce_loss(conf * mask, mask) + 0.5 * self.bce_loss(conf * noobj_mask, noobj_mask)
        loss_cls = self.bce_loss(pred_cls[mask==1], tcls[mask==1])
        loss = (loss_x + loss_y) * self.lambda_xy + (loss_w + loss_h) * self.lambda_wh + \
            loss_conf * self.lambda_conf + loss_cls * self.lambda_cls
        return loss
    
    def anchor_to_xy(self, anchors):
        """
        anchors is of shape (3, 2)
        """
        hanchor = anchors / 2
        return torch.cat([-hanchor, hanchor], dim=1)

    def get_targets(self, all_boxes, all_labels, anchors, in_w, in_h, ignore_threshold, device='cpu'):
        """
        match gt targets with anchors, target is of shape [N, 5] of (id,x1,y1,x2,y2)s in percent coordinates.
        """
        # bs = boxes.size(0)
        bs = len(all_boxes)

        mask = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        noobj_mask = torch.ones(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        tx = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        ty = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        tw = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        th = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        tconf = torch.zeros(bs, self.num_anchors, in_h, in_w, requires_grad=False).to(device)
        tcls = torch.zeros(bs, self.num_anchors, in_h, in_w, self.num_classes, requires_grad=False).to(device)

        for b in range(bs):
            boxes = all_boxes[b]
            labels = all_labels[b]

            for t in range(boxes.size(0)):
                gx = (boxes[t, 0].item() + boxes[t, 2].item()) / 2 * in_w
                gy = (boxes[t, 1].item() + boxes[t, 3].item()) / 2 * in_h
                gw = (boxes[t, 2].item() - boxes[t, 0].item()) * in_w
                gh = (boxes[t, 3].item() - boxes[t, 1].item()) * in_h
                gi = int(gx)
                gj = int(gy)
                gt_box = torch.FloatTensor([boxes[t, 0] - gx,
                                    boxes[t, 1] - gy, 
                                    boxes[t, 2] - gx,
                                    boxes[t, 3] - gy]).unsqueeze(0)
                anchor_box = self.anchor_to_xy(anchors)
                anchor_ious = find_jaccard_overlap(gt_box, anchor_box).squeeze(0).detach().cpu().numpy()

                noobj_mask[b, anchor_ious > ignore_threshold, gj, gi] = 0
                best_n = np.argmax(anchor_ious)
                mask[b, best_n, gj, gi] = 1
                tx[b, best_n, gj, gi] = gx - gi
                ty[b, best_n, gj, gi] = gy - gj
                tw[b, best_n, gj, gi] = math.log(gw / anchors[best_n][0] + 1.0e-16)
                th[b, best_n, gj, gi] = math.log(gh / anchors[best_n][1] + 1.0e-16)
                tconf[b, best_n, gj, gi] = 1
                tcls[b, best_n, gj, gi, labels[t]] = 1
        return mask, noobj_mask, tx, ty, tw, th, tconf, tcls
