import torch
import torch.nn as nn
import torch.nn.functional as F
from vortex.utils.bbox import find_jaccard_overlap
from vortex.engine.misc import init_weights


def gcxgcy_to_cxcy(gcxgcy, priors_cxcy):
    xy = gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy[:, :2]
    wh = torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]
    return torch.cat([xy, wh], dim=1)


def cxcy_to_gcxgcy(cxcy, priors_cxcy):
    gcxgcy = (cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10)
    gwgh = torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5
    return torch.cat([gcxgcy, gwgh], dim=1)


def cxcy_to_xy(cxcy):
    xymin = cxcy[:, :2] - cxcy[:, 2:] / 2
    xymax = cxcy[:, :2] + cxcy[:, 2:] / 2
    return torch.cat([xymin, xymax], dim=1)


def xy_to_cxcy(xy):
    centers = (xy[:, 2:] + xy[:, :2]) / 2
    wh = xy[:, 2:] - xy[:, :2]
    return torch.cat([centers, wh], dim=1)


class SSDHead(nn.Module):
    """
    Replace AuxiliaryConvolutions
    TODO: add BN layers.
    """
    def __init__(self, num_classes, inplanes=(512, 1024, 512, 256, 256, 256), nboxes=(4, 6, 6, 6, 4, 4)):
        super(SSDHead, self).__init__()
        self.num_classes = num_classes

        # auxiliary convolutions
        self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
        self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)  # dim. reduction because stride > 1

        self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
        self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)  # dim. reduction because padding = 0

        self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
        self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)  # dim. reduction because padding = 0

        # rescale layer
        self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1))
        nn.init.constant_(self.rescale_factors, 20)

        # prediction layers
        self.loc_conv0 = nn.Conv2d(inplanes[0], nboxes[0] * 4, kernel_size=3, padding=1)
        self.loc_conv1 = nn.Conv2d(inplanes[1], nboxes[1] * 4, kernel_size=3, padding=1)
        self.loc_conv2 = nn.Conv2d(inplanes[2], nboxes[2] * 4, kernel_size=3, padding=1)
        self.loc_conv3 = nn.Conv2d(inplanes[3], nboxes[3] * 4, kernel_size=3, padding=1)
        self.loc_conv4 = nn.Conv2d(inplanes[4], nboxes[4] * 4, kernel_size=3, padding=1)
        self.loc_conv5 = nn.Conv2d(inplanes[5], nboxes[5] * 4, kernel_size=3, padding=1)
        
        self.cl_conv0 = nn.Conv2d(inplanes[0], nboxes[0] * num_classes, kernel_size=3, padding=1)
        self.cl_conv1 = nn.Conv2d(inplanes[1], nboxes[1] * num_classes, kernel_size=3, padding=1)
        self.cl_conv2 = nn.Conv2d(inplanes[2], nboxes[2] * num_classes, kernel_size=3, padding=1)
        self.cl_conv3 = nn.Conv2d(inplanes[3], nboxes[3] * num_classes, kernel_size=3, padding=1)
        self.cl_conv4 = nn.Conv2d(inplanes[4], nboxes[4] * num_classes, kernel_size=3, padding=1)
        self.cl_conv5 = nn.Conv2d(inplanes[5], nboxes[5] * num_classes, kernel_size=3, padding=1)

        init_weights(self)

    def forward(self, inputs):
        x0, x1, x2 = inputs
        batch_size = x0.size(0)
        
        # use x2 to generate x3, x4 and x5 
        out = F.relu(self.conv9_1(x2))  # (N, 128, 10, 10)
        out = F.relu(self.conv9_2(out))  # (N, 256, 5, 5)
        x3 = out  # (N, 256, 5, 5)

        out = F.relu(self.conv10_1(out))  # (N, 128, 5, 5)
        out = F.relu(self.conv10_2(out))  # (N, 256, 3, 3)
        x4 = out  # (N, 256, 3, 3)

        out = F.relu(self.conv11_1(out))  # (N, 128, 3, 3)
        x5 = F.relu(self.conv11_2(out))  # (N, 256, 1, 1)

        # rescale x0
        norm = x0.pow(2).sum(dim=1, keepdim=True).sqrt()
        x0 = x0 / norm
        x0 = x0 * self.rescale_factors
        
        # do predictions
        l0 = self.loc_conv0(x0).permute(0, 2, 3, 1).contiguous()
        l0 = l0.view(batch_size, -1, 4)
        l1 = self.loc_conv1(x1).permute(0, 2, 3, 1).contiguous()
        l1 = l1.view(batch_size, -1, 4)
        l2 = self.loc_conv2(x2).permute(0, 2, 3, 1).contiguous()
        l2 = l2.view(batch_size, -1, 4)
        l3 = self.loc_conv3(x3).permute(0, 2, 3, 1).contiguous()
        l3 = l3.view(batch_size, -1, 4)
        l4 = self.loc_conv4(x4).permute(0, 2, 3, 1).contiguous()
        l4 = l4.view(batch_size, -1, 4)
        l5 = self.loc_conv5(x5).permute(0, 2, 3, 1).contiguous()
        l5 = l5.view(batch_size, -1, 4)

        c0 = self.cl_conv0(x0).permute(0, 2, 3, 1).contiguous()
        c0 = c0.view(batch_size, -1, self.num_classes)
        c1 = self.cl_conv1(x1).permute(0, 2, 3, 1).contiguous()
        c1 = c1.view(batch_size, -1, self.num_classes)
        c2 = self.cl_conv2(x2).permute(0, 2, 3, 1).contiguous()
        c2 = c2.view(batch_size, -1, self.num_classes)
        c3 = self.cl_conv3(x3).permute(0, 2, 3, 1).contiguous()
        c3 = c3.view(batch_size, -1, self.num_classes)
        c4 = self.cl_conv4(x4).permute(0, 2, 3, 1).contiguous()
        c4 = c4.view(batch_size, -1, self.num_classes)
        c5 = self.cl_conv5(x5).permute(0, 2, 3, 1).contiguous()
        c5 = c5.view(batch_size, -1, self.num_classes)

        locs = torch.cat([l0, l1, l2, l3, l4, l5], dim=1)
        scores = torch.cat([c0, c1, c2, c3, c4, c5], dim=1)
        return locs, scores


class SSDDetect(nn.Module):
    """
    decode prediction into boxes
    """
    def __init__(self, n_classes, priors, min_score=0.2, max_overlap=0.5, top_k=200):
        super(SSDDetect, self).__init__()
        self.n_classes = n_classes
        self.priors = priors
        self.min_score = min_score
        self.max_overlap = max_overlap
        self.top_k = top_k
    
    def detect(self, predicted_locs, predicted_scores):
        batch_size = predicted_locs.size(0)
        device = predicted_locs.device

        n_priors = self.priors.size(0)
        assert n_priors == predicted_locs.size(1)
        assert n_priors == predicted_scores.size(1)

        predicted_scores = F.softmax(predicted_scores, dim=2)

        all_images_boxes = []
        all_images_labels = []
        all_images_scores = []

        for i in range(batch_size):
            decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors))

            image_boxes = []
            image_labels = []
            image_scores = []

            for c in range(1, self.n_classes):
                class_scores = predicted_scores[i][:, c]
                score_above_min_score = class_scores > self.min_score
                n_above_min_score = score_above_min_score.sum().item()
                if n_above_min_score == 0:
                    continue
                
                class_scores = class_scores[score_above_min_score]
                class_decoded_locs = decoded_locs[score_above_min_score]

                # sort predicted boxes and scores by score value
                class_scores, sort_ind = class_scores.sort(dim=0, descending=True)
                class_decoded_locs = class_decoded_locs[sort_ind]

                overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs)
                
                # NMS
                suppress = torch.zeros((n_above_min_score), dtype=torch.uint8)
                for box in range(class_decoded_locs.size(0)):
                    if suppress[box] == 1:
                        continue
                    suppress = torch.max(suppress, overlap[box] > self.max_overlap)
                    suppress[box] = 0
                
                image_boxes.append(class_decoded_locs[1 - suppress])  # (N, 4)
                image_scores.append(class_scores[1 - suppress])  # (N, )
                image_labels.append(torch.LongTensor((1 - suppress).sum().item() * [c]))

            # concatenate into single tensor 
            if len(image_boxes) == 0:
                image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
                image_labels.append(torch.LongTensor([0]).to(device))
                image_scores.append(torch.FloatTensor([0.]).to(device))

            image_boxes = torch.cat(image_boxes, dim=0)
            image_labels = torch.cat(image_labels, dim=0)
            image_scores = torch.cat(image_scores, dim=0)

            n_objects = image_scores.size(0)
            if n_objects > self.top_k:
                # keep only top_k objects
                image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
                image_scores = image_scores[:self.top_k]
                image_boxes = image_boxes[sort_ind][:self.top_k]
                image_labels = image_labels[sort_ind][:self.top_k]
            all_images_boxes.append(image_boxes)
            all_images_labels.append(image_labels)
            all_images_scores.append(image_scores)
        
        return all_images_boxes, all_images_labels, all_images_scores


class MultiBoxLoss(nn.Module):
    """
    Multibox loss for single shot detector
    """
    def __init__(self, priors, min_score=0.5, neg_pos_ratio=3, alpha=1.0):
        super(MultiBoxLoss, self).__init__()
        self.priors_cxcy = priors
        self.priors_xy = cxcy_to_xy(priors)
        self.min_score = min_score
        self.neg_pos_ratio = neg_pos_ratio
        self.alpha = alpha
        self.smooth_l1 = nn.L1Loss()
        self.cross_entropy = nn.CrossEntropyLoss(reduce=False)

    def forward(self, predicted_locs, predicted_scores, boxes, labels):
        batch_size = predicted_locs.size(0)
        device = predicted_locs.device
        
        n_priors = self.priors_cxcy.size(0)
        n_classes = predicted_scores.size(2)

        assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)

        true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device)  # (N, 8732, 4)
        true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device)  # (N, 8732)

        # For each image
        for i in range(batch_size):
            n_objects = boxes[i].size(0)

            overlap = find_jaccard_overlap(boxes[i], self.priors_xy)  # (n_objects, 8732)

            # For each prior, find the object that has the maximum overlap
            overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0)  # (8732)

            # We don't want a situation where an object is not represented in our positive (non-background) priors -
            # 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior.
            # 2. All priors with the object may be assigned as background based on the threshold (0.5).

            # To remedy this -
            # First, find the prior that has the maximum overlap for each object.
            _, prior_for_each_object = overlap.max(dim=1)  # (N_o)

            # Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)
            object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device)

            # To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)
            overlap_for_each_prior[prior_for_each_object] = 1.

            # Labels for each prior
            label_for_each_prior = labels[i][object_for_each_prior]  # (8732)
            # Set priors whose overlaps with objects are less than the threshold to be background (no object)
            label_for_each_prior[overlap_for_each_prior < self.min_score] = 0  # (8732)

            # Store
            true_classes[i] = label_for_each_prior

            # Encode center-size object coordinates into the form we regressed predicted boxes to
            true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy)  # (8732, 4)

        # Identify priors that are positive (object/non-background)
        positive_priors = true_classes != 0  # (N, 8732)

        # LOCALIZATION LOSS

        # Localization loss is computed only over positive (non-background) priors
        loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors])  # (), scalar

        # Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732)
        # So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)

        # CONFIDENCE LOSS

        # Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
        # That is, FOR EACH IMAGE,
        # we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss
        # This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance

        # Number of positive and hard-negative priors per image
        n_positives = positive_priors.sum(dim=1)  # (N)
        n_hard_negatives = self.neg_pos_ratio * n_positives  # (N)

        # First, find the loss for all priors
        conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1))  # (N * 8732)
        conf_loss_all = conf_loss_all.view(batch_size, n_priors)  # (N, 8732)

        # We already know which priors are positive
        conf_loss_pos = conf_loss_all[positive_priors]  # (sum(n_positives))

        # Next, find which priors are hard-negative
        # To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
        conf_loss_neg = conf_loss_all.clone()  # (N, 8732)
        conf_loss_neg[positive_priors] = 0.  # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
        conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True)  # (N, 8732), sorted by decreasing hardness
        hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device)  # (N, 8732)
        hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1)  # (N, 8732)
        conf_loss_hard_neg = conf_loss_neg[hard_negatives]  # (sum(n_hard_negatives))

        # As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
        conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float()  # (), scalar

        # TOTAL LOSS
        return conf_loss + self.alpha * loc_loss
